diff --git a/.coveragerc b/.coveragerc
new file mode 100644
index 0000000000..ed0e808a0a
--- /dev/null
+++ b/.coveragerc
@@ -0,0 +1,5 @@
+[run]
+branch = True
+
+omit = setup.py, tests/*
+
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000..e04f8db211
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,28 @@
+*.pyc
+*.egg-info
+dist
+build
+.coverage
+htmlcov
+nose2-junit.xml
+_ECDSARecoverModule.pyd
+_ECDSARecoverModule.so
+_poet_enclave_simulator.pyd
+_poet_enclave_simulator.so
+deps/
+
+journal/consensus/poet/poet_enclave_simulator/build
+journal/consensus/poet/poet_enclave_simulator/_*
+journal/consensus/poet/poet_enclave_simulator/*.so
+journal/consensus/poet/poet_enclave_simulator/*.pyd
+journal/consensus/poet/poet_enclave_simulator/poet_enclave_simulator.py
+journal/consensus/poet/poet_enclave_simulator/poet_enclave_simulator_wrap.cpp
+
+gossip/ECDSA/build
+gossip/ECDSA/*.so
+gossip/ECDSA/*.pyd
+gossip/ECDSA/_*
+gossip/ECDSA/ECDSARecoverModule.py
+gossip/ECDSA/ECDSARecoverModule.pyc
+gossip/ECDSA/ECDSARecoverModule_wrap.cpp
+gossip/ECDSA/ECDSAPubKeyRecovery.*
\ No newline at end of file
diff --git a/.pep8 b/.pep8
new file mode 100644
index 0000000000..f253a2378c
--- /dev/null
+++ b/.pep8
@@ -0,0 +1,3 @@
+[pep8]
+ignore=W503
+exclude=build,doc,ECDSARecoverModule.py,EnclaveModule.py,poet_enclave_simulator.py
diff --git a/.pep8-enforced b/.pep8-enforced
new file mode 100644
index 0000000000..f253a2378c
--- /dev/null
+++ b/.pep8-enforced
@@ -0,0 +1,3 @@
+[pep8]
+ignore=W503
+exclude=build,doc,ECDSARecoverModule.py,EnclaveModule.py,poet_enclave_simulator.py
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 0000000000..c18768bf59
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,428 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=import-star-module-level,
+ old-octal-literal,
+ oct-method,
+ print-statement,
+ unpacking-in-except,
+ parameter-unpacking,
+ backtick,
+ old-raise-syntax,
+ old-ne-operator,
+ long-suffix,
+ dict-view-method,
+ dict-iter-method,
+ metaclass-assignment,
+ next-method-called,
+ raising-string,
+ indexing-exception,
+ raw_input-builtin,
+ long-builtin,
+ file-builtin,
+ execfile-builtin,
+ coerce-builtin,
+ cmp-builtin,
+ buffer-builtin,
+ basestring-builtin,
+ apply-builtin,
+ filter-builtin-not-iterating,
+ using-cmp-argument,
+ useless-suppression,
+ range-builtin-not-iterating,
+ suppressed-message,
+ no-absolute-import,
+ old-division,
+ cmp-method,
+ reload-builtin,
+ zip-builtin-not-iterating,
+ intern-builtin,
+ unichr-builtin,
+ reduce-builtin,
+ standarderror-builtin,
+ unicode-builtin,
+ xrange-builtin,
+ coerce-method,
+ delslice-method,
+ getslice-method,
+ setslice-method,
+ input-builtin,
+ round-builtin,
+ hex-method,
+ nonzero-method,
+ map-builtin-not-iterating,
+ bad-continuation,
+ too-many-lines
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )??$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set). This supports can work
+# with qualified names.
+ignored-classes=
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+
+[BASIC]
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,30}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[ELIF]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/.pylintrc-enforced b/.pylintrc-enforced
new file mode 100644
index 0000000000..3338f8e41d
--- /dev/null
+++ b/.pylintrc-enforced
@@ -0,0 +1,460 @@
+[MASTER]
+
+# Specify a configuration file.
+#rcfile=
+
+# Python code to execute, usually for sys.path manipulation such as
+# pygtk.require().
+#init-hook=
+
+# Add files or directories to the blacklist. They should be base names, not
+# paths.
+ignore=CVS
+
+# Pickle collected data for later comparisons.
+persistent=yes
+
+# List of plugins (as comma separated values of python modules names) to load,
+# usually to register additional checkers.
+load-plugins=
+
+# Use multiple processes to speed up Pylint.
+jobs=1
+
+# Allow loading of arbitrary C extensions. Extensions are imported into the
+# active Python interpreter and may run arbitrary code.
+unsafe-load-any-extension=no
+
+# A comma-separated list of package or module names from where C extensions may
+# be loaded. Extensions are loading into the active Python interpreter and may
+# run arbitrary code
+extension-pkg-whitelist=
+
+# Allow optimization of some AST trees. This will activate a peephole AST
+# optimizer, which will apply various small optimizations. For instance, it can
+# be used to obtain the result of joining multiple strings with the addition
+# operator. Joining a lot of strings can lead to a maximum recursion error in
+# Pylint and this flag can prevent that. It has one side effect, the resulting
+# AST will be different than the one from reality.
+optimize-ast=no
+
+
+[MESSAGES CONTROL]
+
+# Only show warnings with the listed confidence levels. Leave empty to show
+# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
+confidence=
+
+# Enable the message, report, category or checker with the given id(s). You can
+# either give multiple identifier separated by comma (,) or put this option
+# multiple time. See also the "--disable" option for examples.
+#enable=
+
+# Disable the message, report, category or checker with the given id(s). You
+# can either give multiple identifiers separated by comma (,) or put this
+# option multiple times (only on the command line, not in the configuration
+# file where it should appear only once).You can also use "--disable=all" to
+# disable everything first and then reenable specific checks. For example, if
+# you want to run only the similarities checker, you can use "--disable=all
+# --enable=similarities". If you want to run only the classes checker, but have
+# no Warning level messages displayed, use"--disable=all --enable=classes
+# --disable=W"
+disable=import-star-module-level,
+ old-octal-literal,
+ oct-method,
+ print-statement,
+ unpacking-in-except,
+ parameter-unpacking,
+ backtick,
+ old-raise-syntax,
+ old-ne-operator,
+ long-suffix,
+ dict-view-method,
+ dict-iter-method,
+ metaclass-assignment,
+ next-method-called,
+ raising-string,
+ indexing-exception,
+ raw_input-builtin,
+ long-builtin,
+ file-builtin,
+ execfile-builtin,
+ coerce-builtin,
+ cmp-builtin,
+ buffer-builtin,
+ basestring-builtin,
+ apply-builtin,
+ filter-builtin-not-iterating,
+ using-cmp-argument,
+ useless-suppression,
+ range-builtin-not-iterating,
+ suppressed-message,
+ no-absolute-import,
+ old-division,
+ cmp-method,
+ reload-builtin,
+ zip-builtin-not-iterating,
+ intern-builtin,
+ unichr-builtin,
+ reduce-builtin,
+ standarderror-builtin,
+ unicode-builtin,
+ xrange-builtin,
+ coerce-method,
+ delslice-method,
+ getslice-method,
+ setslice-method,
+ input-builtin,
+ round-builtin,
+ hex-method,
+ nonzero-method,
+ map-builtin-not-iterating,
+ no-member,
+ missing-docstring,
+ multiple-statements,
+ dangerous-default-value,
+ bare-except,
+ unused-argument,
+ superfluous-parens,
+ duplicate-code,
+ bad-continuation,
+ too-many-instance-attributes,
+ attribute-defined-outside-init,
+ unused-variable,
+ undefined-variable,
+ too-few-public-methods,
+ redefined-outer-name,
+ redefined-builtin,
+ deprecated-lambda,
+ bad-builtin,
+ protected-access,
+ unnecessary-lambda,
+ empty-docstring,
+ broad-except,
+ old-style-class,
+ no-init,
+ super-init-not-called,
+ ungrouped-imports,
+ no-self-use,
+ global-variable-not-assigned,
+ too-many-return-statements,
+ too-many-branches,
+ too-many-arguments,
+ redefined-variable-type,
+ global-statement,
+ too-many-lines
+
+[REPORTS]
+
+# Set the output format. Available formats are text, parseable, colorized, msvs
+# (visual studio) and html. You can also give a reporter class, eg
+# mypackage.mymodule.MyReporterClass.
+output-format=text
+
+# Put messages in a separate file for each module / package specified on the
+# command line instead of printing them on stdout. Reports (if any) will be
+# written in a file name "pylint_global.[txt|html]".
+files-output=no
+
+# Tells whether to display a full report or only the messages
+reports=yes
+
+# Python expression which should return a note less than 10 (10 is the highest
+# note). You have access to the variables errors warning, statement which
+# respectively contain the number of errors / warnings messages and the total
+# number of statements analyzed. This is used by the global evaluation report
+# (RP0004).
+evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+
+# Template used to display messages. This is a python new-style format string
+# used to format the message information. See doc for all details
+#msg-template=
+
+
+[SPELLING]
+
+# Spelling dictionary name. Available dictionaries: none. To make it working
+# install python-enchant package.
+spelling-dict=
+
+# List of comma separated words that should not be checked.
+spelling-ignore-words=
+
+# A path to a file that contains private dictionary; one word per line.
+spelling-private-dict-file=
+
+# Tells whether to store unknown words to indicated private dictionary in
+# --spelling-private-dict-file option instead of raising a message.
+spelling-store-unknown-words=no
+
+
+[LOGGING]
+
+# Logging modules to check that the string format arguments are in logging
+# function parameter format
+logging-modules=logging
+
+
+[MISCELLANEOUS]
+
+# List of note tags to take in consideration, separated by a comma.
+notes=FIXME,XXX,TODO
+
+
+[FORMAT]
+
+# Maximum number of characters on a single line.
+max-line-length=100
+
+# Regexp for a line that is allowed to be longer than the limit.
+ignore-long-lines=^\s*(# )??$
+
+# Allow the body of an if to be on the same line as the test if there is no
+# else.
+single-line-if-stmt=no
+
+# List of optional constructs for which whitespace checking is disabled. `dict-
+# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
+# `trailing-comma` allows a space between comma and closing bracket: (a, ).
+# `empty-line` allows space-only lines.
+no-space-check=trailing-comma,dict-separator
+
+# Maximum number of lines in a module
+max-module-lines=1000
+
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
+# tab).
+indent-string=' '
+
+# Number of spaces of indent required inside a hanging or continued line.
+indent-after-paren=4
+
+# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
+expected-line-ending-format=
+
+
+[SIMILARITIES]
+
+# Minimum lines number of a similarity.
+min-similarity-lines=4
+
+# Ignore comments when computing similarities.
+ignore-comments=yes
+
+# Ignore docstrings when computing similarities.
+ignore-docstrings=yes
+
+# Ignore imports when computing similarities.
+ignore-imports=no
+
+
+[VARIABLES]
+
+# Tells whether we should check for unused import in __init__ files.
+init-import=no
+
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+# List of additional names supposed to be defined in builtins. Remember that
+# you should avoid to define new builtins when possible.
+additional-builtins=
+
+# List of strings which can identify a callback function by name. A callback
+# name must start or end with one of those strings.
+callbacks=cb_,_cb
+
+
+[TYPECHECK]
+
+# Tells whether missing members accessed in mixin class should be ignored. A
+# mixin class is detected if its name ends with "mixin" (case insensitive).
+ignore-mixin-members=yes
+
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis. It
+# supports qualified module names, as well as Unix pattern matching.
+ignored-modules=
+
+# List of classes names for which member attributes should not be checked
+# (useful for classes with attributes dynamically set). This supports can work
+# with qualified names.
+ignored-classes=
+
+# List of members which are set dynamically and missed by pylint inference
+# system, and so shouldn't trigger E1101 when accessed. Python regular
+# expressions are accepted.
+generated-members=
+
+
+[BASIC]
+
+# List of builtins function names that should not be used, separated by a comma
+bad-functions=map,filter,input
+
+# Good variable names which should always be accepted, separated by a comma
+good-names=i,j,k,ex,Run,_
+
+# Bad variable names which should always be refused, separated by a comma
+bad-names=foo,bar,baz,toto,tutu,tata
+
+# Colon-delimited sets of names that determine each other's naming style when
+# the name regexes allow several styles.
+name-group=
+
+# Include a hint for the correct naming format with invalid-name
+include-naming-hint=no
+
+# Regular expression matching correct function names
+function-rgx=[a-z_][a-z0-9_]{2,40}$
+
+# Naming hint for function names
+function-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct variable names
+variable-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$
+
+# Naming hint for variable names
+variable-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct constant names
+const-rgx=(([a-zA-Z_][a-zA-Z0-9_]*)|(__.*__))$
+
+# Naming hint for constant names
+const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+
+# Regular expression matching correct attribute names
+attr-rgx=[A-Za-z_][A-Za-z0-9_]{2,30}$
+
+# Naming hint for attribute names
+attr-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct argument names
+argument-rgx=[A-Za-z_][A-Za-z0-9_]{0,30}$
+
+# Naming hint for argument names
+argument-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression matching correct class attribute names
+class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{0,30}|(__.*__))$
+
+# Naming hint for class attribute names
+class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
+
+# Regular expression matching correct inline iteration names
+inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+
+# Naming hint for inline iteration names
+inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
+
+# Regular expression matching correct class names
+class-rgx=[A-Z_][a-zA-Z0-9]+$
+
+# Naming hint for class names
+class-name-hint=[A-Z_][a-zA-Z0-9]+$
+
+# Regular expression matching correct module names
+module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Naming hint for module names
+module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
+
+# Regular expression matching correct method names
+method-rgx=[a-z_][a-z0-9_]{2,50}$
+
+# Naming hint for method names
+method-name-hint=[a-z_][a-z0-9_]{2,30}$
+
+# Regular expression which should only match function or class names that do
+# not require a docstring.
+no-docstring-rgx=^_
+
+# Minimum line length for functions/classes that require docstrings, shorter
+# ones are exempt.
+docstring-min-length=-1
+
+
+[ELIF]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=5
+
+
+[DESIGN]
+
+# Maximum number of arguments for function / method
+max-args=5
+
+# Argument names that match this expression will be ignored. Default to name
+# with leading underscore
+ignored-argument-names=_.*
+
+# Maximum number of locals for function / method body
+max-locals=15
+
+# Maximum number of return / yield for function / method body
+max-returns=6
+
+# Maximum number of branch for function / method body
+max-branches=12
+
+# Maximum number of statements in function / method body
+max-statements=50
+
+# Maximum number of parents for a class (see R0901).
+max-parents=7
+
+# Maximum number of attributes for a class (see R0902).
+max-attributes=7
+
+# Minimum number of public methods for a class (see R0903).
+min-public-methods=2
+
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=20
+
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=5
+
+
+[CLASSES]
+
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
+
+# List of valid names for the first argument in a class method.
+valid-classmethod-first-arg=cls
+
+# List of valid names for the first argument in a metaclass class method.
+valid-metaclass-classmethod-first-arg=mcs
+
+# List of member names, which should be excluded from the protected access
+# warning.
+exclude-protected=_asdict,_fields,_replace,_source,_make
+
+
+[IMPORTS]
+
+# Deprecated modules which should not be used, separated by a comma
+deprecated-modules=regsub,TERMIOS,Bastion,rexec
+
+# Create a graph of every (i.e. internal and external) dependencies in the
+# given file (report RP0402 must not be disabled)
+import-graph=
+
+# Create a graph of external dependencies in the given file (report RP0402 must
+# not be disabled)
+ext-import-graph=
+
+# Create a graph of internal dependencies in the given file (report RP0402 must
+# not be disabled)
+int-import-graph=
+
+
+[EXCEPTIONS]
+
+# Exceptions that will emit a warning when being caught. Defaults to
+# "Exception"
+overgeneral-exceptions=Exception
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000..2481cfa544
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,141 @@
+# Contributing to Distributed Ledger
+
+This document covers how to report issues and contribute code.
+
+## Topics
+
+* [Reporting Issues](#reporting-issues)
+* [Contributing Code](#contributing-code)
+
+# Reporting Issues
+
+This is a great way to contribute. Before reporting an issue, please review current
+open issues to see if there are any matches. If there is a match, comment with a +1, or "Also seeing this issue".
+If any environment details differ, please add those with your comment to the matching issue.
+
+When reporting an issue, details are key. Include the following:
+- OS version
+- Distributed Ledger version
+- Environment details (virtual, physical, etc.)
+- Steps to reproduce
+- Actual results
+- Expected results
+
+## Notes on GitHub Usage
+It's worth noting that we don't use all the native GitHub features for issue management. For instance, it's uncommon
+ for us to assign issues to the developer who will address it. Here are notes on what we do use.
+
+### Issue Labels
+Distributed Ledger maintainers have a set of labels we'll use to keep up with issues that are organized:
+
+
+
+* **bug** - the classic definition of missing or misbehaving code from existing functionality (this includes malfunctioning tests)
+* **feature request** - any new functionality or improvements/enhancements to existing functionality. Note that we use a
+ single term for this (instead of both feature & enhancement labels) since it's prioritized in identical ways during sprint planning
+* **question** - discussions related to Distribute Ledger, its administration or other details that do not outline how to address the request
+* **RFC** - short for [request for comment](https://en.wikipedia.org/wiki/Request_for_Comments). These are discussions of
+ Distributed Ledger features requests that include detailed opinions of implementation that are up for discussion
+
+We also add contextual notes we'll use to provide more information regarding an issue:
+
+ * **in progress** - we're taking action (right now). It's best not to develop your own solution to an issue in this state. Comments are welcome
+ * **help wanted** - A useful flag to show this issue would benefit from community support. Please comment or, if it's not in progress, say you'd like to take on the request
+ * **on hold** - An idea that gained momentum but has not yet been put into a maintainer's queue to complete. Used to inform any trackers of this status
+ * **tracked** - This issue is in the JIRA backlog for the team working on Distributed Ledger
+ * **duplicate** - Used to tag issues which are identical to other issues _OR_ which are resolved by the same fix of another issue (either case)
+ * **wontfix** - The universal sign that we won't fix this issue. This tag is important to use as we separate out the nice-to-have
+ features from our strategic direction
+
+# Contributing Code
+
+Distributed Ledger is Apache 2.0 licensed and accepts contributions via GitHub pull requests.
+
+Before contributing any code, note that you will be asked to sign-off on the
+[Developer Certificate of Origin](http://developercertificate.org/).
+Please review the document and ensure you can sign-off on it.
+
+Fork the repository and make your changes in a feature branch. Please add a prefix to the branch name (XXXX-) where
+XXX is the Github bug or issue number.
+
+Please include unit and integration test changes.
+
+Please ensure the unit and integration tests run successfully. Both are run with `nose2`,
+ but integration tests are run if the environment variable ENABLE_INTEGRATION_TESTS is set.
+
+### Commit Guidelines
+
+Commits should have logical groupings. A bug fix should be a single commit. A new feature
+should be a single commit.
+
+Commit messages should be clear on what is being fixed or added to the code base. If a
+commit is addressing an open issue, please start the commit message with "Fix: #XXX" or
+"Feature: #XXX". This will help make the generated changelog for each release easy to read
+with what commits were fixes and what commits were features.
+
+### Pull Request Guidelines
+
+Pull requests can contain a single commit or multiple commits. The most important part is that _**a single commit maps to a single fix**_. Here are a few scenarios:
+* If a pull request adds a feature but also fixes two bugs, then the pull request should have three commits, one commit each for the feature and two bug fixes
+* If a PR is opened with 5 commits that was work involved to fix a single issue, it should be rebased to a single commit
+* If a PR is opened with 5 commits, with the first three to fix one issue and the second two to fix a separate issue, then it should be rebased to two commits, one for each issue
+
+Your pull request should be rebased against the current master branch. Please do not merge
+the current master branch in with your topic branch, nor use the Update Branch button provided
+by GitHub on the pull request page.
+
+### Sign your work
+
+**Please ensure your commit messages end with the "Signed-off-by:" tag followed
+ by your name and email address to certify the origin of the contribution. Do not use pseudonyms.**
+ (Please see the -s and --signoff flags on [git commit](https://git-scm.com/docs/git-commit))
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+ have the right to submit it under the open source license
+ indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+ of my knowledge, is covered under an appropriate open source
+ license and I have the right under that license to submit that
+ work with modifications, whether created in whole or in part
+ by me, under the same open source license (unless I am
+ permitted to submit under a different license), as indicated
+ in the file; or
+
+(c) The contribution was provided directly to me by some other
+ person who certified (a), (b) or (c) and I have not modified
+ it.
+
+(d) I understand and agree that this project and the contribution
+ are public and that a record of the contribution (including all
+ personal information I submit with it, including my sign-off) is
+ maintained indefinitely and may be redistributed consistent with
+ this project or the open source license(s) involved.
+```
+
+### Merge Approval
+
+The maintainers of the repo utilize a "Looks Good To Me" (LGTM) message in the pull request.
+After one or more maintainer states LGTM, we will merge. If you have questions or comments on your code,
+feel free to correct these in your branch through new commits.
+
+
+
+
+
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000..94ab15933d
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2016 Intel Corporation
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000000..45b4107f5b
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,4 @@
+recursive-include gossip *.py
+recursive-include journal *.py
+recursive-include ledger *.py
+include journal/consensus/poet/poet_enclave_simulator/*
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000..929613492d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,15 @@
+## Getting Started
+For an overview of the Distributed Ledger, a quickstart guide and API documentation,
+please go to: [Distributed Ledger Documentation](http://intelledger.github.io/index.html).
+
+## Security Notice:
+This project includes a consensus algorithm, PoET (Proof of Elapsed Time),
+designed to run in a secure enclave like
+[IntelĀ® Software Guard Extensions ](https://software.intel.com/en-us/isa-extensions/intel-sgx).
+The version included in this project is intended to provide the same
+functional characteristics, but runs **unprotected**. It does **not**
+provide security in this mode. This project is intended for experimental usage.
+Do not use this project for security sensitive applications.
+
+## Contents
+This repository contains many fundamental classes used in the distributed ledger system.
\ No newline at end of file
diff --git a/bin/run_lint b/bin/run_lint
new file mode 100755
index 0000000000..e0289f3787
--- /dev/null
+++ b/bin/run_lint
@@ -0,0 +1,23 @@
+#!/bin/bash
+
+set -e
+
+directories="$(find . -maxdepth 2 -name __init__.py | awk -F/ '{print $2}') tests"
+
+files=""
+for dir in $directories
+do
+ for file in $(find $dir -name \*.py)
+ do
+ [ $file = "gossip/ECDSA/ECDSARecoverModule.py" ] && continue
+ [ $file = "journal/consensus/poet/Enclave/EnclaveModule.py" ] && continue
+ [ $file = "journal/consensus/poet/poet_enclave_simulator/poet_enclave_simulator.py" ] && continue
+ files="$files $file"
+ done
+done
+
+pep8 --config=.pep8-enforced
+python setup.py -q lint \
+ --lint-rcfile=.pylintrc-enforced \
+ --lint-reports=no \
+ --lint-packages=$(echo $files | sed -e 's/ /,/g')
diff --git a/doc/.gitignore b/doc/.gitignore
new file mode 100644
index 0000000000..f64d621e4e
--- /dev/null
+++ b/doc/.gitignore
@@ -0,0 +1,2 @@
+build
+_*
diff --git a/doc/Communication.rst b/doc/Communication.rst
new file mode 100644
index 0000000000..bb8280ad01
--- /dev/null
+++ b/doc/Communication.rst
@@ -0,0 +1,24 @@
+=================================================================
+Communication Layer
+=================================================================
+
+The Communication Layer facilitates communication among a collection of
+Nodes through gossip protocols.
+
+Directly connected peers identified by host/port information
+
+Other peers identified by address based on ECDSA verify key
+
+Connection requests must be accepted before packets processed
+
+The Communication Layer provides
+
+* Rudimentary flow control between peers
+* Reliable delivery
+* Limited distribution
+
+.. autoclass:: gossip.Gossip.Gossip
+ :members: AddNode, DropNode, RegisterMessageHandler,
+ ClearMessageHandler, GetMessageHandler, SendMessage,
+ ForwardMessage, BroadcastMessage
+
diff --git a/doc/EndPointRegistry.rst b/doc/EndPointRegistry.rst
new file mode 100644
index 0000000000..f60166bc2a
--- /dev/null
+++ b/doc/EndPointRegistry.rst
@@ -0,0 +1,3 @@
+-----------------------------------------------------------------
+Endpoint Registry Transaction Family
+-----------------------------------------------------------------
diff --git a/doc/IntegerKey.rst b/doc/IntegerKey.rst
new file mode 100644
index 0000000000..a5b0ea2c53
--- /dev/null
+++ b/doc/IntegerKey.rst
@@ -0,0 +1,3 @@
+-----------------------------------------------------------------
+IntegerKey Transaction Family
+-----------------------------------------------------------------
diff --git a/doc/Journal.rst b/doc/Journal.rst
new file mode 100644
index 0000000000..eb58e118b5
--- /dev/null
+++ b/doc/Journal.rst
@@ -0,0 +1,5 @@
+=================================================================
+Journal
+=================================================================
+
+A Journal orders a list of blocks of ordered transactions.
diff --git a/doc/Ledger.rst b/doc/Ledger.rst
new file mode 100644
index 0000000000..4ae6cfe0f9
--- /dev/null
+++ b/doc/Ledger.rst
@@ -0,0 +1,12 @@
+=================================================================
+Ledgers
+=================================================================
+
+A Ledger is defined by a database specification, transaction types that
+modify the database, and rules that determine which transactions are
+acceptable.
+
+.. toctree::
+
+ EndPointRegistry
+ IntegerKey
diff --git a/doc/Makefile b/doc/Makefile
new file mode 100644
index 0000000000..07576458f7
--- /dev/null
+++ b/doc/Makefile
@@ -0,0 +1,196 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SPHINXAPIDOC = sphinx-apidoc
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
+
+help:
+ @echo "Please use \`make ' where is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+
+clean:
+ rm -rf $(BUILDDIR)/*
+
+apidoc:
+ $(SPHINXAPIDOC) -o code -M -f -e -H Sawtooth ../ ../setup.py
+
+html: apidoc
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+dirhtml: apidoc
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+singlehtml: apidoc
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+pickle: apidoc
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+json: apidoc
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+htmlhelp: apidoc
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+qthelp: apidoc
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/SawtoothLakeDigitalLedger.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/SawtoothLakeDigitalLedger.qhc"
+
+applehelp: apidoc
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+devhelp: apidoc
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/SawtoothLakeDigitalLedger"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/SawtoothLakeDigitalLedger"
+ @echo "# devhelp"
+
+epub: apidoc
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+latex: apidoc
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+latexpdf: apidoc
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+latexpdfja: apidoc
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+text: apidoc
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+man: apidoc
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+texinfo: apidoc
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+info: apidoc
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+gettext: apidoc
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+changes: apidoc
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+linkcheck: apidoc
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+doctest: apidoc
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+coverage: apidoc
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+xml: apidoc
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+pseudoxml: apidoc
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/doc/conf.py b/doc/conf.py
new file mode 100644
index 0000000000..8631cc7983
--- /dev/null
+++ b/doc/conf.py
@@ -0,0 +1,296 @@
+# -*- coding: utf-8 -*-
+#
+# Sawtooth Lake Digital Ledger documentation build configuration file, created by
+# sphinx-quickstart on Thu Sep 3 14:03:57 2015.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+import shlex
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.viewcode',
+ 'sphinx.ext.napoleon'
+]
+
+# Autodoc settings
+autodoc_member_order = 'bysource'
+autoclass_content = 'both'
+
+# Napoleon settings
+napoleon_use_ivar = True
+napoleon_include_special_with_doc = True
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = u'Sawtooth Lake Digital Ledger'
+copyright = u'2016, Intel Corporation'
+author = u'Intel Corporation'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = '0.1'
+# The full version, including alpha/beta/rc tags.
+release = '0.1'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'alabaster'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# " v documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'SawtoothLakeDigitalLedgerdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'SawtoothLakeDigitalLedger.tex', u'Sawtooth Lake Digital Ledger Documentation',
+ u'Intel Corporation', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'sawtoothlakedigitalledger', u'Sawtooth Lake Digital Ledger Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'SawtoothLakeDigitalLedger', u'Sawtooth Lake Digital Ledger Documentation',
+ author, 'SawtoothLakeDigitalLedger', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000000..dbd2dae86b
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,29 @@
+.. Sawtooth Lake Digital Ledger documentation master file, created by
+ sphinx-quickstart on Thu Sep 3 14:03:57 2015.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Sawtooth Lake Decentralized Ledger
+========================================================
+
+Sawtooth Lake is a software platform for building decentralized ledgers
+comparable to the Blockchain Ledger that underlies Bitcoin.
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ Communication
+ Journal
+ Ledger
+ code/modules
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/doc/make.bat b/doc/make.bat
new file mode 100644
index 0000000000..6f1e7218f9
--- /dev/null
+++ b/doc/make.bat
@@ -0,0 +1,263 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^` where ^ is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. xml to make Docutils-native XML files
+ echo. pseudoxml to make pseudoxml-XML files for display purposes
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ echo. coverage to run coverage check of the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+
+REM Check if sphinx-build is available and fallback to Python version if any
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 goto sphinx_python
+goto sphinx_ok
+
+:sphinx_python
+
+set SPHINXBUILD=python -m sphinx.__init__
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+:sphinx_ok
+
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\SawtoothLakeDigitalLedger.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\SawtoothLakeDigitalLedger.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "latexpdf" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ cd %BUILDDIR%/latex
+ make all-pdf
+ cd %~dp0
+ echo.
+ echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "latexpdfja" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ cd %BUILDDIR%/latex
+ make all-pdf-ja
+ cd %~dp0
+ echo.
+ echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+if "%1" == "coverage" (
+ %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of coverage in the sources finished, look at the ^
+results in %BUILDDIR%/coverage/python.txt.
+ goto end
+)
+
+if "%1" == "xml" (
+ %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The XML files are in %BUILDDIR%/xml.
+ goto end
+)
+
+if "%1" == "pseudoxml" (
+ %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+ goto end
+)
+
+:end
diff --git a/gossip/ECDSA/ECDSARecover.cc b/gossip/ECDSA/ECDSARecover.cc
new file mode 100644
index 0000000000..a76f1a35ab
--- /dev/null
+++ b/gossip/ECDSA/ECDSARecover.cc
@@ -0,0 +1,331 @@
+// Copyright 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ------------------------------------------------------------------------------
+
+
+/*
+* @file ECDSARecover.cc
+* @author Dan Middleton
+* @date 2016-01-28
+* @status RESEARCH PROTOTYPE
+*
+* Recover public key from ECDSA Signature and associated message hash
+* Given an ECDSA Signature: (r,s) and message hash, e
+* Return public key, Q, as Q = r^-1(sr-eG)
+* where G is the group Generator.
+* Specifically written for secp256k1 curve with sha256. Should not be used with other curves or hash functions.
+*/
+
+#include "ECDSARecover.h"
+#include
+#include
+#include
+#include
+
+using namespace CryptoPP;
+using namespace std;
+
+/* Recovers the public key encoded in an ECDSA signature
+ * @param msgHash: message hash;
+ * @param r,s: signature pair
+ * @param yBit: y recovery value as defined in Certicom Sec 1 v2.
+ * @return Returns point Q (public key) as a serialized x,y pair.
+ */
+string recoverPubKeyFromSig(Integer e, Integer r, Integer s, int yBit) {
+#ifdef DEBUG_PUBKRECOVER
+ cout << endl << "Enter recoverPubKeyFromSig(...)" << endl;
+#endif
+ ECDSA::PrivateKey tmp;
+ tmp.Initialize(ASN1::secp256k1(), 2); //use private key constructor to get the curve params
+
+ //Setup variables
+ Integer h(tmp.GetGroupParameters().GetCofactor());
+ Integer a(tmp.GetGroupParameters().GetCurve().GetA());
+ Integer b(tmp.GetGroupParameters().GetCurve().GetB());
+ Integer p(tmp.GetGroupParameters().GetCurve().GetField().GetModulus()); //Field modulus
+ Integer n(tmp.GetGroupParameters().GetSubgroupOrder()); //Curve modulus. #E(p)=n < #Fp=p
+ ECPPoint G(tmp.GetGroupParameters().GetSubgroupGenerator()); //Curve generator
+ ECPPoint R(1,1); //Point to be recovered from signature; initialized off curve for safety.
+ Integer x(0L), y(0L), exp(0L); //x, y, and exponentiation term used for recovering point R.
+
+ ECP curve(p, a, b); //manually specify params for secp256k1 extracted from ECDSA class above.
+
+ if (r > n || r < 0) { //Check inputs.
+ string error = "Invalid signature. r exceeds group size.\n";
+ throw std::domain_error(error);
+ return "";
+ }
+ if (s > n || s < 0) {
+ string error = "Invalid signature. s exceeds group size.\n";
+ throw std::domain_error(error);
+ return "";
+ }
+ if (e.BitCount() > 256 || e < 0) { //e may be larger than n, but may not exceed sha256 bit length.
+ string error = "Invalid signature. Message hash value out of range.\n";
+ throw std::domain_error(error);
+ return "";
+ }
+
+ //Use r (the x coordinate of R=kG) to compute y
+ for (int i = 0; i < (h + 1); i++) { //Iterate over the cofactor to try multiple possible x deriving from r.
+ x = r + i*n; //x may be between n and p and ~shrunken when set to r = x mod n.
+ if (x>p) {
+ string error = "Invalid signature. Recovered R.x exceeds field modulus.\n";
+ throw std::domain_error(error);
+ return ""; //x could never have been larger than the field modulus, p.
+ }
+
+ y = (x * x * x + 7) % p; //computes y^2 hardcoded to secp256k params a=0, b=7;
+ exp = (p + 1) / 4; //Exponentiation rule for finding sqrt when p = 3 mod 4 (see HAC 3.36)...
+ y = a_exp_b_mod_c(y, exp, p); //...find sqrt of y^2
+
+ if ((yBit % 2) ^ (y % 2)) { //yBit indicates if we expect y to be odd. If there's a mismatch then we need the other y.
+ y = p - y; //sqrt(y^2) = {y,-y} if yBit trips then must select -y
+ }
+
+ R.x = x; R.y = y;
+ if (curve.VerifyPoint(R)) { //Check if this point is on the curve.
+ break; //If so jump out of the cofactor loop
+ } //If not maybe we have another loop iteration to find it.
+
+ }
+
+
+ if(!curve.VerifyPoint(R)){ //Validate final computed point is on the curve
+ string error = "Recover Pub Key from Sig: Computed point is not on curve.\n";
+ throw std::domain_error(error);
+ return "";
+ }
+
+ //Compute Q=r^-1(sR-eG) mod p
+ ECPPoint sR(curve.Multiply(s, R)); //compute s*R
+ ECPPoint eG(curve.Multiply(e, G)); //compute e*G
+ ECPPoint sR_eG(curve.Subtract(sR, eG));//compute sR-eG
+ Integer rInv = r.InverseMod(n); //Compute modular inverse of r
+ ECPPoint Q(curve.Multiply(rInv, sR_eG));//Apply r_inverse to sR-eG
+
+ /*
+ * Check that Q actually validates the message. For optimization this can probably be removed.
+ * Crypto++ takes the message not a digest as input. We only have access to the digest.
+ * i.e.: verifier.VerifyMessage((const byte*)message.data(), message.size(), (const byte*)signature.data(), signature.size());
+ * Instead do signature verification from scratch.
+ */
+ //If Q or QP is the identity or if it isn't on the curve then fail
+ if ((Q == curve.Identity()) || (curve.Multiply(p, Q) == curve.Identity()) || (!curve.VerifyPoint(Q))) {
+ string error = "Recover Pub Key from Sig: Calculated Q fails basic criteria.\n";
+ throw std::domain_error(error);
+ return "";
+ }
+
+ //Compute ewG + rwQ; x component of sum should equal r for sig to verify
+ Integer w(s.InverseMod(n)); //Calculate s^-1
+ Integer u1(a_times_b_mod_c(e, w, n)); // u1 = ew mod n
+ Integer u2(a_times_b_mod_c(r, w, n)); // u2 = rw mod n
+ ECPPoint u1G(curve.Multiply(u1, G)); // u1*G
+ ECPPoint u2Q(curve.Multiply(u2, Q)); // u2*Q
+ ECPPoint X1(curve.Add(u1G, u2Q)); // u1G + u2Q;
+ if (!curve.VerifyPoint(X1)) {
+ string error = "x1 did not verify as a point on the curve.\n";
+ throw std::domain_error(error);
+ return "";
+ }
+
+ Integer x1 = X1.x % n; // take x coordinate mod n
+ if (r != x1) { // if r == x1 then signature verifies
+ string error = "Failed to recover pubkey from signature. Recovered key fails to verify signature\n";
+ throw std::domain_error(error);
+ return "";
+ }
+
+#ifdef DEBUG_PUBKRECOVER
+ cout << "Success recovering a pubkey from signature.\n";
+ cout << "Computed R..." << endl;
+ cout << " R.x: " << R.x << endl;
+ cout << " R.y: " << R.y << endl;
+ cout << "Computed Q..." << endl;
+ cout << " Q.x: " << Q.x << endl;
+ cout << " Q.y: " << Q.y << endl;
+ cout << "Q hex... " << endl;
+ cout << " Q.x: " << std::hex << Q.x << endl;
+ cout << " Q.y: " << Q.y << endl << std::dec;
+ cout << "Input r: " << r << endl;
+ cout << "Computed x1: " << x1 << endl;
+#endif
+
+ std::stringstream xss, yss, stream;
+ xss << std::hex << Q.x; //Get hex strings of points
+ yss << std::hex << Q.y;
+ string xstr = xss.str(); xstr.resize(xstr.size()-1); // xstr.pop_back(); //Strip off cryptopp's hex "h" tag.
+ string ystr = yss.str(); ystr.resize(ystr.size()-1); // ystr.pop_back();
+ stream << std::setw(64) << std::setfill('0') << xstr; //Pad out 64 nibbles
+ stream << std::setw(64) << std::setfill('0') << ystr; //Pad out 64 nibbles
+ return stream.str();
+}
+
+// TEST method
+// Expects signature computed from the following
+// d:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7aeh
+// k:48692452077975311141641379449682050563269990734773417387024709146437866544976 (note: dec)
+// e:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9h
+// Should have created an r,s:
+// r:73822833206246044331228008262087004113076292229679808334250850393445001014761
+// s:58995174607243353628346858794753620798088291196940745194581481841927132845752
+void test(Integer e, Integer r, Integer s){
+ ECDSA::PrivateKey tmp;
+ tmp.Initialize(ASN1::secp256k1(), 2); //use private key constructor to get the curve params
+
+ //Setup variables
+ Integer h(tmp.GetGroupParameters().GetCofactor());
+ Integer a(tmp.GetGroupParameters().GetCurve().GetA());
+ Integer b(tmp.GetGroupParameters().GetCurve().GetB());
+ Integer p(tmp.GetGroupParameters().GetCurve().GetField().GetModulus()); //Field modulus
+ Integer n(tmp.GetGroupParameters().GetSubgroupOrder()); //Curve modulus. #E(p)=n < #Fp=p
+ ECPPoint G(tmp.GetGroupParameters().GetSubgroupGenerator()); //Curve generator
+ ECP curve(p, a, b); //manually specify params for secp256k extracted from ECDSA class above.
+ Integer d("2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7aeh");
+
+ //derive k
+ Integer k("48692452077975311141641379449682050563269990734773417387024709146437866544976"); //yanked from python
+ Integer w = s.InverseMod(n);
+ cout << "TEST: Expected k: " << k << endl;
+ ECPPoint RPrime(curve.Multiply(k,G));
+ Integer rx = RPrime.x %n;
+ Integer ry = RPrime.y %n;
+ cout << "TEST: R computed from k\n";
+ cout << "TEST: kG.x mod n: " << rx << endl;
+ cout << "TEST: kG.y mod n: " << ry << endl;
+ k = 0;
+ cout << "TEST: Cleared k: " << k << endl;
+ k = (e + r*d) %n;
+ k = w * k %n;
+ ECPPoint R(curve.Multiply(k, G));
+ if(r == R.x) {
+ cout << "TEST: k verified by r==R.x\n" << "TEST: k: " << k << endl;
+ } else {
+ cerr << "TEST: k computation FAILED\n" << "TEST: k: " << k << endl;
+ }
+ cout << "TEST: computed R.x: " << R.x << endl;
+
+ //Derive e = sk - rd
+ Integer u = s * k % n;
+ Integer v = r * d % n;
+ v = n - v;
+ Integer derived_e = u + v %n;
+ if(e == derived_e) {
+ cout << "TEST: e verified by sk-rd\n" << "TEST: e': " << derived_e << endl;
+ } else {
+ cerr << "TEST: e compuation FAILED\n" << "TEST: e': " << derived_e << endl;
+ }
+}
+
+string recoverPubKeyFromSig(string msgHash, string sig_r, string sig_s, int yBit) {
+ if (msgHash.empty() || sig_r.empty() || sig_s.empty() || yBit > 3 || yBit < 0)
+ throw std::invalid_argument("Empty string or invalid yBit value.\n");
+ try {
+ Integer e(msgHash.data());
+ Integer r(sig_r.data());
+ Integer s(sig_s.data());
+#ifdef DEBUG_PUBKRECOVER
+ cout << "In c++ code" << endl;
+ cout << "e: " << e << endl;
+ cout << "hex(e): " << std::hex << e << endl;
+ cout << "r: " << std::dec << r << endl;
+ cout << "s: " << s << endl;
+ cout << "ybit: " << yBit << endl;
+#endif
+#ifdef TEST_PUBKRECOVER
+ test(e, r, s);
+#endif
+ return recoverPubKeyFromSig(e, r, s, yBit);
+ }
+ catch (std::domain_error e) {
+ throw(e);
+ return "";
+ }
+ catch (exception e) {
+ throw(e);
+ return "";
+ }
+}
+
+string recoverPubKeyFromSig_Base32(string msgHash, string sig_r, string sig_s, int yBit) {
+ Integer e, r, s;
+ byte tmp[32];
+ word64 size;
+
+ Base32Decoder decoderA;
+
+ decoderA.Put((byte*)msgHash.data(), msgHash.size());
+ decoderA.MessageEnd();
+ size = decoderA.MaxRetrievable();
+
+ if (size && size <= SIZE_MAX)
+ {
+ decoderA.Get(tmp, 32);
+ e.Decode(tmp, 32);
+ cout << "decoded e: " << e << endl;
+ }
+ else {
+ string error = "Invalid sized msg hash to recoverPubkeyFromSig\n";
+ throw std::invalid_argument(error);
+ return "";
+ }
+ //decoder.Initialize();
+ Base32Decoder decoderB;
+ decoderB.Put((byte*)sig_r.data(), sig_r.size());
+ decoderB.MessageEnd();
+ size = decoderB.MaxRetrievable();
+ if (size && size <= SIZE_MAX)
+ {
+ decoderB.Get(tmp, 32);
+ r.Decode(tmp, 32);
+ cout << "decoded r: " << r << endl;
+ }
+ else {
+ string error = "Invalid sized sig_r to recoverPubkeyFromSig\n";
+ throw std::invalid_argument(error);
+ return "";
+ }
+ //decoder.Initialize();
+ Base32Decoder decoderC;
+ decoderC.Put((byte*)sig_s.data(), sig_s.size());
+ decoderC.MessageEnd();
+ size = decoderC.MaxRetrievable();
+ if (size && size <= SIZE_MAX)
+ {
+ decoderC.Get(tmp, 32);
+ s.Decode(tmp, 32);
+ cout << "decoded s: " << s << endl;
+ }
+ else {
+ string error = "Invalid sized sig_s to recoverPubkeyFromSig\n";
+ throw std::invalid_argument(error);
+ return "";
+ }
+//TODO: Pulled the base32 return format out of the main recovery function. Need to do that here. Something like this:
+/* byte buffer[64];
+ Q.x.Encode(&buffer[0], 32);
+ Q.y.Encode(&buffer[32], 32);
+
+ Base32Encoder encoder(NULL, false);
+ encoder.Put(buffer, 64);
+ encoder.MessageEnd();
+
+ string encoded;
+ encoded.resize(encoder.MaxRetrievable());
+ encoder.Get((byte *)encoded.data(), encoded.size());
+*/
+ return recoverPubKeyFromSig(e, r, s, yBit);
+
+}
diff --git a/gossip/ECDSA/ECDSARecover.h b/gossip/ECDSA/ECDSARecover.h
new file mode 100644
index 0000000000..13c85f93b6
--- /dev/null
+++ b/gossip/ECDSA/ECDSARecover.h
@@ -0,0 +1,50 @@
+// Copyright 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ------------------------------------------------------------------------------
+
+
+/*
+*
+* @file ECDSARecover.h
+* @author Dan Middleton
+* @date 2016-01-28
+* @status RESEARCH PROTOTYPE
+*
+* Recover public key from ECDSA Signature
+* Given an ECDSA Signature: (r,s) and message hash, e
+* Return public key, Q, as Q = r^-1(sr-eG)
+* where G is the group Generator.
+* Specifically written for secp256k1 curve. Should not be used with other curves.
+*/
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+/* Recovers the public key encoded in an ECDSA signature
+ * @param msgHash: message hash;
+ * @param r,s: signature pair
+ * @param yBit: y recovery value as defined in Certicom Sec 1 v2.
+ * @return Returns point Q (public key) as a serialized x,y pair.
+*/
+std::string recoverPubKeyFromSig(std::string msgHash, std::string sig_r, std::string sig_s, int yBit);
+
+//Internally it calls a big integer version. This header is intended for swig
+//so we don't expose that method here.
+//string recoverPubKeyFromSig(Integer msgHash, Integer sig_r, Integer sig_s, int yBit);
diff --git a/gossip/ECDSA/ECDSARecoverModule.i b/gossip/ECDSA/ECDSARecoverModule.i
new file mode 100644
index 0000000000..c985c00cf6
--- /dev/null
+++ b/gossip/ECDSA/ECDSARecoverModule.i
@@ -0,0 +1,25 @@
+%module ECDSARecoverModule
+
+%include
+
+%{
+#include "ECDSARecover.h"
+%}
+
+using namespace std;
+
+%exception recoverPubKeyFromSig {
+ try {
+ $action
+ } catch (std::invalid_argument &e) {
+ PyErr_SetString(PyExc_ValueError, const_cast(e.what()));
+ return NULL;
+ } catch (std::domain_error &e) {
+ PyErr_SetString(PyExc_ValueError, const_cast(e.what()));
+ return NULL;
+ } catch (std::exception &e) {
+ PyErr_SetString(PyExc_RuntimeError, const_cast(e.what()));
+ return NULL;
+ }
+}
+string recoverPubKeyFromSig(string msghash, string sig_r, string sig_s, int yBit);
\ No newline at end of file
diff --git a/gossip/ECDSA/__init__.py b/gossip/ECDSA/__init__.py
new file mode 100644
index 0000000000..0e24362ab1
--- /dev/null
+++ b/gossip/ECDSA/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
diff --git a/gossip/__init__.py b/gossip/__init__.py
new file mode 100644
index 0000000000..b46a3d4ebf
--- /dev/null
+++ b/gossip/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['common', 'config', 'event_handler', 'gossip_core', 'message',
+ 'node', 'signed_object', 'stats', 'token_bucket']
diff --git a/gossip/common.py b/gossip/common.py
new file mode 100644
index 0000000000..13e1b082f9
--- /dev/null
+++ b/gossip/common.py
@@ -0,0 +1,123 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+The Common module defines utility methods used across gossip.
+"""
+
+import json
+import logging
+from collections import OrderedDict
+
+import cbor
+
+logger = logging.getLogger(__name__)
+
+NullIdentifier = '0' * 16
+
+
+def pretty_print_dict(input):
+ """Generates a pretty-print formatted version of the input JSON.
+
+ Args:
+ input (str): the JSON string to format.
+
+ Returns:
+ str: pretty-print formatted string.
+ """
+ return json.dumps(_ascii_encode_dict(input), indent=2, sort_keys=True)
+
+
+def json2dict(input):
+ """Deserializes JSON into a dictionary.
+
+ Args:
+ input (str): the JSON string to deserialize.
+
+ Returns:
+ dict: a dictionary object reflecting the structure of the JSON.
+ """
+ return _ascii_encode_dict(json.loads(input))
+
+
+def dict2json(input):
+ """Serializes a dictionary into JSON.
+
+ Args:
+ input (dict): a dictionary object to serialize into JSON.
+
+ Returns:
+ str: a JSON string reflecting the structure of the input dict.
+ """
+ return json.dumps(_ascii_encode_dict(input))
+
+
+def cbor2dict(input):
+ """Deserializes CBOR into a dictionary.
+
+ Args:
+ input (bytes): the CBOR object to deserialize.
+
+ Returns:
+ dict: a dictionary object reflecting the structure of the CBOR.
+ """
+
+ return _ascii_encode_dict(cbor.loads(input))
+
+
+def dict2cbor(input):
+ """Serializes a dictionary into CBOR.
+
+ Args:
+ input (dict): a dictionary object to serialize into CBOR.
+
+ Returns:
+ bytes: a CBOR object reflecting the structure of the input dict.
+ """
+
+ return cbor.dumps(_unicode_encode_dict(input), sort_keys=True)
+
+
+def _ascii_encode_dict(input):
+ """
+ Support method to ensure that JSON is converted to ascii since unicode
+ identifiers, in particular, can cause problems
+ """
+ if isinstance(input, dict):
+ return OrderedDict(
+ (_ascii_encode_dict(key), _ascii_encode_dict(input[key]))
+ for key in sorted(input.keys()))
+ elif isinstance(input, list):
+ return [_ascii_encode_dict(element) for element in input]
+ elif isinstance(input, unicode):
+ return input.encode('ascii')
+ else:
+ return input
+
+
+def _unicode_encode_dict(input):
+ """
+ Support method to ensure that JSON is converted to ascii since unicode
+ identifiers, in particular, can cause problems
+ """
+ if isinstance(input, dict):
+ return OrderedDict(
+ (_unicode_encode_dict(key), _unicode_encode_dict(input[key]))
+ for key in sorted(input.keys()))
+ elif isinstance(input, list):
+ return [_unicode_encode_dict(element) for element in input]
+ elif isinstance(input, str):
+ return unicode(input)
+ else:
+ return input
diff --git a/gossip/config.py b/gossip/config.py
new file mode 100644
index 0000000000..e40ba0ef39
--- /dev/null
+++ b/gossip/config.py
@@ -0,0 +1,217 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import os
+import re
+
+from gossip.common import json2dict
+
+
+class ConfigFileNotFound(Exception):
+ """Exception thrown when config files are expected but not found."""
+
+ def __init__(self, config_files, search_path):
+ self.config_files = config_files
+ self.search_path = search_path
+
+ def __str__(self):
+ return ("Unable to locate the following configuration files: " +
+ "{0} (search path: {1})".format(", ".join(self.config_files),
+ ", ".join(self.search_path)))
+
+
+class InvalidSubstitutionKey(Exception):
+ """Exception raised when a config uses invalid substitution key."""
+
+ def __init__(self, key, config_key, config_value, source):
+ self.key = key
+ self.config_key = config_key
+ self.config_value = config_value
+ self.source = source
+
+ def __str__(self):
+ text = ("invalid substitution key of " + self.key + " for " +
+ self.config_key + " with value '" + self.config_value + "'")
+ if self.source is not None:
+ text = text + " in " + self.source
+ return text
+
+
+class Config(dict):
+ """Configuration base class."""
+
+ def __init__(self, name="config", cfg={}, source=None, **kwargs):
+ super(Config, self).__init__(**kwargs)
+ self.name = name
+ self.update(cfg)
+ self._source = source
+
+ # The maximum number of times substitutions should be performed
+ # during resolve().
+ self.substitution_max_iterations = 10
+
+ def get_source(self, key):
+ """Returns a the source of the key."""
+
+ return self._source
+
+ def resolve(self, substitutions):
+ """Performs path substitutions, as provided, and then returns
+ a dict of key/value pairs.
+
+ Keyword arguments:
+ substitutions -- a dict where the key is the variable to be
+ substituted and the value is the config key to use to
+ lookup the value
+ """
+
+ pathsubs = {}
+ for key, value in substitutions.iteritems():
+ if value in self:
+ pathsubs[key] = self[value]
+
+ cfg = {}
+ for key, value in self.iteritems():
+ if isinstance(value, str):
+ for i in xrange(self.substitution_max_iterations):
+ try:
+ new_value = value.format(**pathsubs)
+ except KeyError, e:
+ raise InvalidSubstitutionKey(
+ str(e), key, value, self.get_source(key))
+ if new_value == value:
+ break
+ value = new_value
+ cfg[key] = value
+
+ return cfg
+
+
+class EnvConfig(Config):
+ """Configuration based on environment variables."""
+
+ def __init__(self, env_to_config_list):
+ super(EnvConfig, self).__init__(name="env")
+
+ self._source_data = {}
+
+ for (env_key, config_key) in env_to_config_list:
+ if env_key in os.environ:
+ self[config_key] = os.environ[env_key]
+ self._source_data[config_key] = env_key
+
+ def get_source(self, key):
+ return self.name + ":" + self._source_data[key]
+
+
+class ArgparseOptionsConfig(Config):
+ """Configuration based on argparse options."""
+
+ def __init__(self, option_to_config_list, options):
+ super(ArgparseOptionsConfig, self).__init__(name="cli")
+
+ options_dict = vars(options)
+
+ for (option_key, config_key) in option_to_config_list:
+ if (option_key in options_dict.keys()
+ and options_dict[option_key] is not None):
+ self[config_key] = options_dict[option_key]
+
+
+class JsonConfig(Config):
+ """Loads configuration from a JSON file given the file content."""
+
+ def __init__(self, lines, filename=None):
+ super(JsonConfig, self).__init__()
+
+ if filename is not None:
+ self.name = "json:" + filename
+ else:
+ self.name = "json"
+
+ self._parse(lines)
+
+ def _parse(self, lines):
+ cpattern = re.compile('##.*$')
+
+ text = ""
+ for line in lines:
+ text += re.sub(cpattern, '', line) + ' '
+
+ json_dict = json2dict(text)
+
+ self.update(json_dict)
+
+
+class JsonFileConfig(Config):
+ """Loads configuration from a JSON file given a filename."""
+
+ def __init__(self, filename):
+ super(JsonFileConfig, self).__init__(name="file:" + filename)
+
+ with open(filename) as fd:
+ lines = fd.readlines()
+ cfg = JsonConfig(lines, filename)
+ self.update(cfg)
+
+
+class AggregateConfig(Config):
+ """Aggregates multiple Configs by applying them in order."""
+
+ def __init__(self, configs):
+ super(AggregateConfig, self).__init__(name="aggregate")
+
+ self._source_data = {}
+
+ for config in configs:
+ for key, value in config.iteritems():
+ self[key] = value
+ self._source_data[key] = config.get_source(key)
+
+ def get_source(self, key):
+ return self._source_data[key]
+
+
+def load_config_files(config_files, search_path, config_files_required=True):
+ """Loads a set of config files from a search path.
+
+ Keyword arguments:
+ config_files -- a list of config filenames
+ search_path -- a list of directories to search
+ config_files_required -- if True, ConfigFilesNotFound is thrown if
+ the configuration files cannot be located
+ """
+ files_not_found = []
+ files_found = []
+
+ for cfile in config_files:
+ filename = None
+ for directory in search_path:
+ if os.path.isfile(os.path.join(directory, cfile)):
+ filename = os.path.join(directory, cfile)
+ break
+
+ if filename is None:
+ files_not_found.append(cfile)
+ else:
+ files_found.append(filename)
+
+ if config_files_required and len(files_not_found) > 0:
+ raise ConfigFileNotFound(files_not_found, search_path)
+
+ config_list = []
+ for filename in files_found:
+ config_list.append(JsonFileConfig(filename))
+ return config_list
diff --git a/gossip/event_handler.py b/gossip/event_handler.py
new file mode 100644
index 0000000000..2c9b1a20a4
--- /dev/null
+++ b/gossip/event_handler.py
@@ -0,0 +1,75 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+"""
+This module defines the EventHandler class which allows for the
+registration, removal, and invocation of event callbacks.
+"""
+
+import logging
+
+logger = logging.getLogger(__name__)
+
+
+class EventHandler(object):
+ """Handles the registration, removal, and invocation of event callbacks.
+
+ Attributes:
+ EventName (str): The name of the event handler.
+ """
+
+ def __init__(self, evname):
+ """Constructor for the EventHandler class.
+
+ Args:
+ evname (str): The name of the event handler.
+ """
+ self.EventName = evname
+ self._handlers = []
+
+ def __iadd__(self, handler):
+ self._handlers.append(handler)
+ return self
+
+ def __isub__(self, handler):
+ self._handlers.remove(handler)
+ return self
+
+ def __call__(self, *args, **keywargs):
+ try:
+ # This calls all of the handlers, but will only return true if they
+ # ALL return true.
+ result = True
+ for handler in self._handlers:
+ if not handler(*args, **keywargs) is True:
+ result = False
+ return result
+ except:
+ logger.exception('event handler %s failed', self.EventName)
+
+ def fire(self, *args, **keywargs):
+ """Execute all of the registered callbacks.
+
+ Args:
+ args (list): An unpacked list of arguments to pass to the
+ callback.
+ keywargs (list): An unpacked dict of arguments to pass to the
+ callback.
+
+ Returns:
+ bool: True if ALL of the handlers return True. Otherwise
+ returns False.
+ """
+ return self.__call__(*args, **keywargs)
diff --git a/gossip/gossip_core.py b/gossip/gossip_core.py
new file mode 100644
index 0000000000..3d4c2ff6b3
--- /dev/null
+++ b/gossip/gossip_core.py
@@ -0,0 +1,754 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+This module defines the core gossip class for communication between nodes.
+"""
+
+import Queue
+import errno
+import logging
+import socket
+import sys
+import time
+
+from twisted.internet import reactor, task
+from twisted.internet.protocol import DatagramProtocol
+
+from gossip import event_handler
+from gossip import message
+from gossip import stats
+from gossip.messages import connect_message
+from gossip.messages import gossip_debug
+from gossip.messages import random_walk_message
+from gossip.messages import shutdown_message
+from gossip.messages import topology_message
+
+logger = logging.getLogger(__name__)
+
+
+class Gossip(object, DatagramProtocol):
+ """Defines the protocol for gossip communcation between nodes.
+
+ Attributes:
+ ExpireMessageTime (int): Time in seconds to hold message to test
+ for duplicates.
+ MaximumPacketSize (int): The maximum size of a packet.
+ CleanupInterval (float): The number of seconds between cleanups.
+ KeepAliveInterval (float): The number of seconds between keep
+ alives.
+ MinimumRetries (int): The minimum number of retries on message
+ retransmission.
+ RetryInterval (float): The time between retries, in seconds.
+ LocalNode (Node): The local node sending and receiving messages.
+ NodeMap (dict): A map of peer nodes the local node is communicating
+ with.
+ PendingAckMap (dict): A map of incoming messages that require
+ acknowledgement.
+ MessageHandledMap (dict): A map of handled messages where keys are
+ message identifiers and values are message expiration times.
+ MessageHandlerMap (dict): A map of message types to handler
+ functions.
+ SequenceNumber (int): The next sequence number to be used for
+ messages from the local node.
+ NextCleanup (float): The time of the next cleanup event.
+ NextKeepAlive (float): The time of the next keepalive event.
+ onNodeDisconnect (EventHandler): An EventHandler for functions
+ to call when a node becomes disconnected.
+ onHeartbeatTimer (EventHandler): An EventHandler for functions
+ to call when the heartbeat timer fires.
+ MessageQueue (Queue): The queue of incoming messages.
+ ProcessIncomingMessages (bool): Whether or not to process incoming
+ messages.
+ Listener (Reactor.listenUDP): The UDP listener.
+ """
+
+ # time in seconds to hold message to test for duplicates
+ ExpireMessageTime = 300
+ MaximumPacketSize = 8192 * 6 - 128
+ CleanupInterval = 1.00
+ KeepAliveInterval = 10.0
+
+ def __init__(self, node, **kwargs):
+ """Constructor for the Gossip class.
+
+ Args:
+ node (Node): The local node.
+ MinimumRetries (int): The minimum number of retries on message
+ transmission.
+ RetryInterval (float): The time between retries, in seconds.
+ """
+ if 'MinimumRetries' in kwargs:
+ self.MinimumRetries = kwargs['MinimumRetries']
+ if 'RetryInterval' in kwargs:
+ self.RetryInterval = kwargs['RetryInterval']
+
+ self.LocalNode = node
+ self.NodeMap = {}
+
+ self.PendingAckMap = {}
+ self.MessageHandledMap = {}
+ self.MessageHandlerMap = {}
+
+ self.SequenceNumber = 0
+ self.NextCleanup = time.time() + self.CleanupInterval
+ self.NextKeepAlive = time.time() + self.KeepAliveInterval
+
+ self._initgossipstats()
+
+ connect_message.register_message_handlers(self)
+
+ gossip_debug.register_message_handlers(self)
+ shutdown_message.register_message_handlers(self)
+ topology_message.register_message_handlers(self)
+ random_walk_message.register_message_handlers(self)
+
+ # setup connectivity events
+ self.onNodeDisconnect = event_handler.EventHandler('onNodeDisconnect')
+
+ # setup the timer events
+ self.onHeartbeatTimer = event_handler.EventHandler('onHeartbeatTimer')
+ self.onHeartbeatTimer += self._timertransmit
+ self.onHeartbeatTimer += self._timercleanup
+ self.onHeartbeatTimer += self._keepalive
+
+ self._HeartbeatTimer = task.LoopingCall(self._heartbeat)
+ self._HeartbeatTimer.start(0.05)
+
+ self.MessageQueue = Queue.Queue()
+
+ try:
+ self.ProcessIncomingMessages = True
+ self.Listener = reactor.listenUDP(self.LocalNode.NetPort,
+ self,
+ interface=self.LocalNode.NetHost)
+ reactor.callInThread(self._dispatcher)
+
+ except:
+ logger.critical(
+ "failed to connect local socket, server shutting down",
+ exc_info=True)
+ sys.exit(0)
+
+ def _initgossipstats(self):
+ self.PacketStats = stats.Stats(self.LocalNode.Name, 'packet')
+ self.PacketStats.add_metric(stats.Average('BytesSent'))
+ self.PacketStats.add_metric(stats.Average('BytesReceived'))
+ self.PacketStats.add_metric(stats.Counter('MessagesAcked'))
+ self.PacketStats.add_metric(stats.Counter('DuplicatePackets'))
+ self.PacketStats.add_metric(stats.Counter('DroppedPackets'))
+ self.PacketStats.add_metric(stats.Counter('AcksReceived'))
+ self.PacketStats.add_metric(stats.Counter('MessagesHandled'))
+ self.PacketStats.add_metric(stats.Sample(
+ 'UnackedPacketCount', lambda: len(self.PendingAckMap)))
+
+ self.MessageStats = stats.Stats(self.LocalNode.Name, 'message')
+ self.MessageStats.add_metric(stats.MapCounter('MessageType'))
+
+ self.StatDomains = {
+ 'packet': self.PacketStats,
+ 'message': self.MessageStats
+ }
+
+ def peer_list(self, allflag=False, exceptions=[]):
+ """Returns a list of peer nodes.
+
+ Args:
+ allflag (bool): Whether to include all peers.
+ exceptions (list): A list of node identifiers to exclude
+ from the peer list.
+
+ Returns:
+ list: A list of Nodes considered peers.
+ """
+ peers = []
+ for peer in self.NodeMap.itervalues():
+ if allflag or peer.Enabled:
+ if peer.Identifier not in exceptions:
+ peers.append(peer)
+ return peers
+
+ def peer_id_list(self, allflag=False, exceptions=[]):
+ """Returns a list of peer node identifiers.
+
+ Args:
+ allflag (bool): Whether to include all peers.
+ exceptions (list): A list of node identifiers to exclude
+ from the peer list.
+
+ Returns:
+ list: A list of Node identifiers considered peers.
+
+ """
+ return map(lambda p: p.Identifier, self.peer_list(allflag, exceptions))
+
+ def next_sequence_number(self):
+ """Increments the sequence number and returns it.
+
+ Returns:
+ int: The current sequence number for messages.
+ """
+ self.SequenceNumber += 1
+ return self.SequenceNumber
+
+ # --------------------------------- ###
+ # DatagramProtocol Overrides ###
+ # --------------------------------- ###
+
+ def startProtocol(self):
+ """Starts the gossip protocol."""
+ endpoint = self.transport.getHost()
+ logger.info('listening on %s', endpoint)
+ self.LocalNode.NetPort = endpoint.port
+ self.transport.maxPacketSize = self.MaximumPacketSize
+
+ def stopProtocol(self):
+ """Stops the gossip protocol."""
+ pass
+
+ def datagramReceived(self, data, address):
+ """Handles a received datagram.
+
+ Find a handler for the message if one exists, and call it if
+ the message has not already been handled. Also forward to peers
+ as appropriate.
+
+ Args:
+ data (str): the text of the message
+ address (str): host:port network address of the peer
+ """
+
+ if not self.ProcessIncomingMessages:
+ return
+
+ self.PacketStats.BytesReceived.add_value(len(data))
+
+ # unpack the header
+ try:
+ packet = message.Packet()
+ packet.unpack(data)
+ except:
+ logger.exception('failed to unpack message')
+ return
+
+ # Grab peer information if it is available, unless this is a system
+ # message we don't process any further without a known peer
+ srcpeer = self.NodeMap.get(packet.SenderID)
+ if srcpeer:
+ srcpeer.reset_ticks()
+
+ # Handle incoming acknowledgements first, there is no data associated
+ # with an ack
+ if packet.IsAcknowledgement:
+ if srcpeer:
+ self._handleack(packet)
+ return
+
+ # first thing to do with the message is to send an ACK, all
+ # retransmissions will be handled by the sending node, if the
+ # IsReliable flag is set then this is not a system message & we know
+ # that the peer exists
+ if packet.IsReliable:
+ if srcpeer:
+ self._sendack(packet, srcpeer)
+
+ # now unpack the rest of the message
+ try:
+ minfo = message.unpack_message_data(packet.Data)
+ except:
+ logger.exception('unable to decode message with length %d',
+ len(data))
+ return
+
+ # if we don't have a handler, thats ok we just dont do anything
+ # with the message, note the missing handler in the logs however
+ typename = minfo['__TYPE__']
+ self.MessageStats.MessageType.increment(typename)
+
+ if typename not in self.MessageHandlerMap:
+ logger.info('no handler found for message type %s from %s',
+ minfo['__TYPE__'], srcpeer or packet.SenderID[:8])
+ return
+
+ try:
+ msg = self.unpack_message(typename, minfo)
+ msg.TimeToLive = packet.TimeToLive - 1
+ msg.SenderID = packet.SenderID
+ except:
+ logger.exception(
+ 'unable to deserialize message of type %s from %s', typename,
+ packet.SenderID[:8])
+ return
+
+ # if we have seen this message before then just ignore it
+ if msg.Identifier in self.MessageHandledMap:
+ logger.debug('duplicate message %s received from %s', msg,
+ packet.SenderID[:8])
+ self.PacketStats.DuplicatePackets.increment()
+
+ # if we have received a particular message from a node then we dont
+ # need to send another copy back to the node, just remove it from
+ # the queue
+ try:
+ if srcpeer:
+ srcpeer.dequeue_message(msg)
+ except:
+ pass
+
+ return
+
+ # verify the signature, this is a no-op for the gossiper, but
+ # subclasses might override the function, system messages need not have
+ # verified signatures
+ if not msg.IsSystemMessage and not msg.verify_signature():
+ logger.warn('unable to verify message %s received from %s',
+ msg.Identifier[:8], msg.OriginatorID[:8])
+ return
+
+ # Handle system messages,these do not require the existence of
+ # a peer. If the packet is marked as a system message but the message
+ # type does not, then something bad is happening.
+ self.PacketStats.MessagesHandled.increment()
+
+ if srcpeer or msg.IsSystemMessage:
+ self.handle_message(msg)
+ return
+
+ logger.warn('received message %s from an unknown peer %s', msg,
+ packet.SenderID[:8])
+
+ # --------------------------------- ###
+ # Utility functions ###
+ # --------------------------------- ###
+
+ def _heartbeat(self):
+ """Invoke functions that are connected to the heartbeat timer.
+ """
+ try:
+ now = time.time()
+ self.onHeartbeatTimer.fire(now)
+ except:
+ logger.exception('unhandled error occured during timer processing')
+
+ def _dowrite(self, msg, peer):
+ """Put a message on the wire.
+
+ Args:
+ message (bytes): The contents of the message to send.
+ peer (Node): The node to send the message to.
+
+ Returns:
+ bool: Whether or not the attempt to send the message succeeded.
+ """
+
+ if len(msg) > self.MaximumPacketSize:
+ logger.error(
+ 'attempt to send a message beyond maximum packet size, %d',
+ len(msg))
+ return False
+
+ try:
+ sentbytes = self.transport.write(msg, peer.NetAddress)
+ except socket.error as serr:
+ if serr.errno == errno.EWOULDBLOCK:
+ logger.error('outbound queue is full, dropping message to %s',
+ peer)
+ return False
+ else:
+ logger.critical(
+ 'unknown socket error occurred while sending message '
+ 'to %s; %s',
+ peer, serr)
+ return False
+ except:
+ logger.exception('error occurred while writing to %s', peer)
+ return False
+
+ if sentbytes < len(msg):
+ logger.error('message transmission truncated at %d, expecting %d',
+ sentbytes, len(msg))
+
+ self.PacketStats.BytesSent.add_value(sentbytes)
+ return True
+
+ def _sendmsg(self, msg, destids):
+ """Handle a request to send a message.
+
+ Rather than send immediately we'll queue it up in the destination
+ node to allow for flow control.
+
+ Args:
+ msg (Message): Initialized message of type Message.Message()
+ or a subclass.
+ destids (list): List of peer identifiers (UUIDs).
+ """
+
+ now = time.time()
+
+ for dstnodeid in destids:
+ dstnode = self.NodeMap.get(dstnodeid)
+ if not dstnode:
+ logger.debug('attempt to send message to unknown node %s',
+ dstnodeid[:8])
+ continue
+
+ if dstnode.Enabled or msg.IsSystemMessage:
+ dstnode.enqueue_message(msg, now)
+
+ def _sendack(self, packet, peer):
+ """Send an acknowledgement for a reliable packet.
+
+ Args:
+ packet (Packet): Incoming packet.
+ peer (Node): Initialized peer object.
+ """
+
+ logger.debug("sending ack for %s to %s", packet, peer)
+
+ self._dowrite(packet.create_ack(self.LocalNode.Identifier).pack(),
+ peer)
+ self.PacketStats.MessagesAcked.increment()
+
+ def _handleack(self, incomingpkt):
+ """Handle an incoming acknowledgement.
+
+ Args:
+ incomingpkt (Packet): Incoming packet.
+ """
+
+ incomingnode = self.NodeMap.get(incomingpkt.SenderID,
+ incomingpkt.SenderID[:8])
+ originalpkt = self.PendingAckMap.get(incomingpkt.SequenceNumber)
+
+ # First thing to do is make sure that we have a record of this packet,
+ # its not necessarily broken if the sequence number doesn't exist
+ # because we may have expired the original packet if the ack took too
+ # long to get here
+ if not originalpkt:
+ logger.info('received unexpected ack for packet %s from %s',
+ incomingpkt, incomingnode)
+ return
+
+ # The source for the ack better be the one we sent the original packet
+ # to
+ # This could be a real problem so mark it as a warning
+ origdstnode = self.NodeMap[originalpkt.DestinationID]
+ if incomingpkt.SenderID != origdstnode.Identifier:
+ logger.warn('received ack for packet %s from %s instead of %s',
+ incomingpkt, incomingnode, origdstnode)
+ logger.warn('%s, %s', incomingpkt.SenderID, origdstnode.Identifier)
+ return
+
+ # Normal situation... update the RTT estimator to help with future
+ # retranmission times and remove the message from the retranmission
+ # queue
+ origdstnode.message_delivered(originalpkt.Message,
+ time.time() - originalpkt.TransmitTime)
+
+ self.PacketStats.AcksReceived.increment()
+ del self.PendingAckMap[incomingpkt.SequenceNumber]
+
+ def _timertransmit(self, now):
+ """A periodic handler that iterates through the nodes and sends
+ any packets that are queued for delivery.
+
+ Args:
+ now (float): Current time.
+ """
+ srcnode = self.LocalNode
+
+ dstnodes = self.peer_list(True)
+ while len(dstnodes) > 0:
+ newnodes = []
+ for dstnode in dstnodes:
+ msg = dstnode.get_next_message(now)
+ if msg:
+ if dstnode.Enabled or msg.IsSystemMessage:
+ # basically we are looping through the nodes & as long
+ # as there are messages pending then come back around &
+ # try again
+ newnodes.append(dstnode)
+
+ packet = message.Packet()
+ packet.add_message(msg, srcnode, dstnode,
+ self.next_sequence_number())
+ packet.TransmitTime = now
+
+ if packet.IsReliable:
+ self.PendingAckMap[packet.SequenceNumber] = packet
+
+ self._dowrite(packet.pack(), dstnode)
+
+ dstnodes = newnodes
+
+ def _timercleanup(self, now):
+ """A periodic handler that performs a variety of cleanup operations
+ including checks for dropped packets.
+
+ Args:
+ now (float): Current time.
+ """
+ if now < self.NextCleanup:
+ return
+
+ logger.debug("clean up processing %d unacked packets",
+ len(self.PendingAckMap))
+
+ self.NextCleanup = now + self.CleanupInterval
+
+ # Process packet retransmission
+ deleteq = []
+ for seqno, packet in self.PendingAckMap.iteritems():
+ if (packet.TransmitTime + packet.RoundTripEstimate) < now:
+ deleteq.append((seqno, packet))
+
+ for (seqno, packet) in deleteq:
+ logger.debug('packet %d has been marked as dropped', seqno)
+
+ self.PacketStats.DroppedPackets.increment()
+
+ # inform the node that we are treating the packet as though
+ # it has been dropped, the node may have already closed the
+ # connection so check that here
+ if packet.DestinationID in self.NodeMap:
+ dstnode = self.NodeMap[packet.DestinationID]
+ dstnode.message_dropped(packet.Message, now)
+
+ # and remove it from our saved queue
+ del self.PendingAckMap[seqno]
+
+ # Clean up information about old messages handled, this is a hack to
+ # reduce memory utilization and does create an opportunity for spurious
+ # duplicate messages to be processed. Should be fixed for production
+ # use
+ deleteq = []
+ for msgid in self.MessageHandledMap.keys():
+ exptime = self.MessageHandledMap[msgid]
+ if exptime < now:
+ deleteq.append(msgid)
+
+ for msgid in deleteq:
+ del self.MessageHandledMap[msgid]
+
+ def _keepalive(self, now):
+ """A periodic handler that sends a keep alive message to all peers.
+
+ Args:
+ now (float): Current time.
+ """
+ if now < self.NextKeepAlive:
+ return
+
+ self.NextKeepAlive = now + self.KeepAliveInterval
+ self.forward_message(connect_message.KeepAliveMessage())
+
+ # Check for nodes with excessive RTTs, for now just report.
+ for node in self.peer_list(True):
+ node.bump_ticks()
+ if node.MissedTicks > 10:
+ logger.info('no messages from node %s in %d ticks, dropping',
+ node, node.MissedTicks)
+ self.drop_node(node.Identifier)
+
+ def _dispatcher(self):
+ while self.ProcessIncomingMessages:
+ msg = self.MessageQueue.get()
+ try:
+ if msg and msg.MessageType in self.MessageHandlerMap:
+ self.MessageHandlerMap[msg.MessageType][1](msg, self)
+
+ # handle the attribute error specifically so that the message type
+ # can be used in the next exception
+ except:
+ logger.exception(
+ 'unexpected error handling message of type %s',
+ msg.MessageType)
+
+ self.MessageQueue.task_done()
+
+ # --------------------------------- ###
+ # Locally defined interface methods ###
+ # --------------------------------- ###
+
+ def shutdown(self):
+ """Handle any shutdown processing.
+
+ Note:
+ Subclasses should override this method.
+ """
+ logger.info(
+ 'send disconnection message to peers in preparation for shutdown')
+
+ self.forward_message(connect_message.DisconnectRequestMessage())
+
+ # We could turn off packet processing at this point but we really need
+ # to leave the socket open long enough to send the disconnect messages
+ # that we just queued up
+ self.ProcessIncomingMessages = False
+ self.MessageQueue.put(None)
+
+ def register_message_handler(self, msg, handler):
+ """Register a function to handle incoming messages for the
+ specified message type.
+
+ Args:
+ msg (type): A type object derived from MessageType.
+ handler (function): Function to be called when messages of
+ that type arrive.
+ """
+ self.MessageHandlerMap[msg.MessageType] = (msg, handler)
+
+ def clear_message_handler(self, msg):
+ """Remove any handlers associated with incoming messages for the
+ specified message type.
+
+ Args:
+ msg (type): A type object derived from MessageType.
+ """
+ try:
+ del self.MessageHandlerMap[msg.MessageType]
+ except:
+ pass
+
+ def get_message_handler(self, msg):
+ """Returns the function registered to handle incoming messages
+ for the specified message type.
+
+ Args:
+ msg (type): A type object derived from MessageType.
+ handler (function): Function to be called when messages of
+ that type arrive.
+
+ Returns:
+ function: The registered handler function for this message
+ type.
+ """
+ return self.MessageHandlerMap[msg.MessageType][1]
+
+ def unpack_message(self, mtype, minfo):
+ """Unpack a dictionary into a message object using the
+ registered handlers.
+
+ Args:
+ mtype (str): Name of the message type.
+ minfo (dict): Dictionary with message data.
+
+ Returns:
+ The result of the handler called with minfo.
+ """
+ return self.MessageHandlerMap[mtype][0](minfo)
+
+ def add_node(self, peer):
+ """Adds an endpoint to the list of peers known to this node.
+
+ Args:
+ peer (Node): The peer node to add to the node map.
+ """
+ self.NodeMap[peer.Identifier] = peer
+ peer.initialize_stats(self.LocalNode)
+
+ def drop_node(self, peerid):
+ """Drops an endpoint from the list of connected peers
+
+ Args
+ peer (Node): The peer node to remove from the node map.
+ """
+ try:
+ del self.NodeMap[peerid]
+ self.onNodeDisconnect.fire(peerid)
+ except:
+ pass
+
+ def forward_message(self, msg, exceptions=[], initialize=True):
+ """Forward a previously received message on to our peers.
+
+ This is useful for request messages that only need to be
+ forwarded if they cannot be handled locally, but where
+ we do not want to re-process the request.
+
+ Args:
+ msg (Message): The message to forward.
+ exceptions (list): A list of Nodes to exclude from the peer_list.
+ initialize (bool): Whether to initialize the origin fields, used
+ for initial send of the message.
+ """
+
+ if msg.IsForward:
+ logger.warn('Attempt to forward a broadcast message with id %s',
+ msg.Identifier[:8])
+ msg.IsForward = False
+
+ if initialize:
+ msg.sign_from_node(self.LocalNode)
+
+ self._sendmsg(msg, self.peer_id_list(exceptions=exceptions))
+
+ def send_message(self, msg, peerid, initialize=True):
+ """Send an encoded message through the peers to the entire
+ network of participants.
+
+ Args:
+ msg (Message): The message to send.
+ peerid (str): Identifer of the peer node.
+ initialize (bool): Whether to initialize the origin fields, used
+ for initial send of the message.
+ """
+
+ if msg.IsForward:
+ logger.warn('Attempt to unicast a broadcast message with id %s',
+ msg.Identifier[:8])
+ msg.IsForward = False
+
+ if initialize:
+ msg.sign_from_node(self.LocalNode)
+
+ self._sendmsg(msg, [peerid])
+
+ def broadcast_message(self, msg, initialize=True):
+ """Send an encoded message through the peers to the entire network
+ of participants.
+
+ Args:
+ msg (Message): The message to broadcast.
+ initialize (bool): Whether to initialize the origin fields, used
+ for initial send of the message.
+ """
+
+ if not msg.IsForward:
+ logger.warn('Attempt to broadcast a unicast message with id %s',
+ msg.Identifier[:8])
+ msg.IsForward = True
+
+ if initialize:
+ msg.sign_from_node(self.LocalNode)
+
+ self.handle_message(msg)
+
+ def handle_message(self, msg):
+ """Handle a message.
+
+ Args:
+ msg (Message): The message to handle.
+ """
+ # mark the message as handled
+ logger.debug('calling handler for message %s from %s of type %s',
+ msg.Identifier[:8], msg.SenderID[:8], msg.MessageType)
+
+ self.MessageHandledMap[msg.Identifier] = time.time(
+ ) + self.ExpireMessageTime
+ self.MessageQueue.put(msg)
+
+ # and now forward it on to the peers if it is marked for forwarding
+ if msg.IsForward and msg.TimeToLive > 0:
+ self._sendmsg(msg, self.peer_id_list(exceptions=[msg.SenderID]))
diff --git a/gossip/message.py b/gossip/message.py
new file mode 100644
index 0000000000..54d601e166
--- /dev/null
+++ b/gossip/message.py
@@ -0,0 +1,243 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+"""
+This module defines the Packet and Message classes, which are responsible
+for representing data transmissions in the gossip protocol.
+"""
+
+import logging
+import struct
+import time
+
+from gossip.common import cbor2dict, dict2cbor
+from gossip.signed_object import SignedObject
+
+logger = logging.getLogger(__name__)
+
+
+class Packet(object):
+ """The Packet class manages the data that goes onto and comes off of
+ the wire.
+
+ Attributes:
+ PackedFormat (str): A struct packed format string representing
+ the packed structure of the header.
+ TimeToLive (int): The maximum number of hops to forward the
+ message.
+ SequenceNumber (int): A monotonically increasing counter used to
+ identify packets.
+ IsAcknowledgement (bool): Whether this packet is an
+ acknowledgement packet.
+ IsReliable (bool): Whether this packet uses reliable delivery.
+ SenderID (str): The identifier for the node that sent this
+ packet.
+ Data (str): The data content of the packet.
+ TransmitTime (float): The time the packet was transmitted, in
+ seconds since the epoch.
+ RoundTripEstimate (float): An estimate of the round trip time
+ to send a message and receive a response.
+ DestinationID (str): The identifier for the node that is
+ intended to receive this packet.
+ Identifier (str): The message identifier.
+ """
+
+ PackedFormat = '!LL??36s'
+
+ def __init__(self):
+ """Constructor for the Packet class.
+ """
+ self.TimeToLive = 255
+ self.SequenceNumber = 0
+ self.IsAcknowledgement = False
+ self.IsReliable = True
+ self.SenderID = '========================'
+
+ self.Message = None
+ self.Data = ''
+
+ # bookkeeping properties
+ self.TransmitTime = 0.0
+ self.RoundTripEstimate = 0.0
+ self.DestinationID = '========================'
+ self.Identifier = None
+
+ def __str__(self):
+ return "PKT:{0}:{1}".format(self.SenderID[:8], self.SequenceNumber)
+
+ def create_ack(self, sender):
+ """Creates a new Packet instance with IsAcknowledgement == True
+ and a sequence number which matches this Packet.
+
+ Args:
+ sender (str): An identifier for the sending node.
+
+ Returns:
+ Packet: An acknowledgement Packet associated with this Packet.
+ """
+ packet = Packet()
+
+ packet.TimeToLive = 0
+ packet.SequenceNumber = self.SequenceNumber
+ packet.IsAcknowledgement = True
+ packet.IsReliable = False
+ packet.SenderID = sender
+
+ packet.Data = ''
+
+ return packet
+
+ def add_message(self, msg, src, dst, seqno):
+ """Resets the Packet with the attributes of the Message.
+
+ Args:
+ msg (Message): The message to apply to the packet.
+ src (Node): The source node of the packet.
+ dst (Node): The destination node of the packet.
+ seqno (int): The sequence number of the packet.
+ """
+ self.IsAcknowledgement = False
+
+ self.Identifier = msg.Identifier
+ self.TimeToLive = msg.TimeToLive
+ self.IsReliable = msg.IsReliable
+ self.SequenceNumber = seqno
+
+ self.SenderID = src.Identifier
+ self.DestinationID = dst.Identifier
+ self.RoundTripEstimate = dst.Estimator.RTO
+
+ self.Message = msg
+ self.Data = repr(msg)
+
+ def unpack(self, databuf):
+ """Resets the Packet with the contents of a packed object.
+
+ Args:
+ databuf (bytes): A packed object with a header conforming
+ to PackedFormat.
+ """
+ size = struct.calcsize(self.PackedFormat)
+
+ (ttl, seqno, aflag, rflag, senderid) = struct.unpack(self.PackedFormat,
+ databuf[:size])
+
+ self.TimeToLive = ttl
+ self.SequenceNumber = int(seqno)
+ self.IsAcknowledgement = aflag
+ self.IsReliable = rflag
+ self.SenderID = senderid.rstrip('\0')
+
+ self.Data = databuf[size:]
+
+ def pack(self):
+ """Builds a packed object with a header conforming to PackedFormat
+ and includes body contents.
+
+ Returns:
+ bytes: A packed object with a header conforming to
+ PackedFormat.
+ """
+ header = struct.pack(self.PackedFormat, self.TimeToLive,
+ self.SequenceNumber, self.IsAcknowledgement,
+ self.IsReliable, str(self.SenderID))
+
+ return header + self.Data
+
+
+def unpack_message_data(data):
+ """Unpacks CBOR encoded data into a dict.
+
+ Args:
+ data (bytes): CBOR encoded data.
+
+ Returns:
+ dict: A dict reflecting the contents of the CBOR encoded
+ representation.
+ """
+ return cbor2dict(data)
+
+
+class Message(SignedObject):
+ """A Message contains the information and metadata to be transmitted to
+ a node.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ DefaultTimeToLive (int): The default number of hops that the
+ message is considered alive.
+ Nonce (float): A locally unique value generated by the message
+ sender.
+ SenderID (str): Identifier for the node that sent the packet in
+ which the message was delivered. The SenderID is the peer node
+ in the gossip network.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ TimeToLive (int): The configured number of hops that the message
+ is considered alive.
+ """
+ MessageType = "/" + __name__ + "/MessageBase"
+ DefaultTimeToLive = 2 ** 31
+
+ def __init__(self, minfo={}):
+ """Constructor for the Message class.
+
+ Args:
+ minfo (dict): dictionary of values for message fields,
+ generally created from a call to dump().
+ """
+ super(Message, self).__init__(minfo, signkey='__SIGNATURE__')
+
+ self.Nonce = minfo.get('__NONCE__', time.time())
+
+ self.SenderID = '========================'
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.TimeToLive = self.DefaultTimeToLive
+
+ self._data = None
+
+ def __str__(self):
+ return "MSG:{0}:{1}".format(self.OriginatorID[:8], self.Identifier[:8])
+
+ def __repr__(self):
+ if not self._data:
+ self._data = self.serialize()
+ return self._data
+
+ def __len__(self):
+ if not self._data:
+ self._data = dict2cbor(self.dump())
+ return len(self._data)
+
+ def dump(self):
+ """Builds a dict containing base object key/values and message type
+ and nonce.
+
+ Returns:
+ dict: a mapping containing information about the message.
+ """
+ result = super(Message, self).dump()
+
+ result['__TYPE__'] = self.MessageType
+ result['__NONCE__'] = self.Nonce
+
+ return result
diff --git a/gossip/messages/__init__.py b/gossip/messages/__init__.py
new file mode 100644
index 0000000000..e4f0c1b8ee
--- /dev/null
+++ b/gossip/messages/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['connect_message', 'gossip_debug', 'random_walk_message',
+ 'shutdown_message', 'topology_message']
diff --git a/gossip/messages/connect_message.py b/gossip/messages/connect_message.py
new file mode 100644
index 0000000000..9c85395a13
--- /dev/null
+++ b/gossip/messages/connect_message.py
@@ -0,0 +1,317 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+This module implements classes derived from Message for representing
+connection requests, connection replies, disconnection requests, and
+keep alives. It also defines handler methods to be called when these
+message types arrive.
+"""
+
+import logging
+
+from gossip import common, message, node
+
+logger = logging.getLogger(__name__)
+
+
+def send_connection_request(gossiper, peer):
+ """Sends a connection request message to a peer node.
+
+ Args:
+ gossiper (Node): The local node.
+ peer (Node): The remote node.
+ """
+ logger.info("add node %s, %s, %s", peer, peer.Identifier[:8],
+ peer.NetAddress)
+
+ gossiper.add_node(peer)
+
+ request = ConnectRequestMessage()
+ request.NetHost = gossiper.LocalNode.NetHost
+ request.NetPort = gossiper.LocalNode.NetPort
+ request.Name = gossiper.LocalNode.Name
+
+ gossiper.send_message(request, peer.Identifier)
+
+
+def register_message_handlers(gossiper):
+ """Registers the connection-related message handlers for a node.
+
+ Args:
+ gossiper (Node): The node to register message handlers on.
+ """
+ gossiper.register_message_handler(ConnectRequestMessage,
+ connect_request_handler)
+ gossiper.register_message_handler(ConnectReplyMessage,
+ connect_reply_handler)
+ gossiper.register_message_handler(DisconnectRequestMessage,
+ disconnect_request_handler)
+ gossiper.register_message_handler(KeepAliveMessage, keep_alive_handler)
+
+
+class ConnectRequestMessage(message.Message):
+ """Connection request messages are sent to a peer node to initiate
+ a gossip connection.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ Reliable (bool): Whether or not the message requires reliable
+ delivery.
+ NetHost (str): Hostname or IP address identifying the node.
+ NetPort (int): The remote port number to connect to.
+ Name (str): The name of the connection.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ """
+ MessageType = "/" + __name__ + "/ConnectRequest"
+
+ def __init__(self, minfo={}):
+ """Constructor for the ConnectRequestMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(ConnectRequestMessage, self).__init__(minfo)
+ self.Reliable = False
+
+ self.NetHost = minfo.get('Host', "127.0.0.1")
+ self.NetPort = minfo.get('Port', 0)
+ self.Name = minfo.get('Name')
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ @property
+ def NetAddress(self):
+ """Returns the host and port of the connection request message.
+
+ Returns:
+ ordered pair: (host, port).
+ """
+ return (self.NetHost, self.NetPort)
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(ConnectRequestMessage, self).dump()
+
+ result['Host'] = self.NetHost
+ result['Port'] = self.NetPort
+ result['Name'] = self.Name
+
+ return result
+
+
+def connect_request_handler(msg, gossiper):
+ """Handles connection request events.
+
+ When a connection request message arrives, the requesting node is added
+ as a peer and a reply message is sent.
+
+ Args:
+ msg (Message): The received connection request message.
+ gossiper (Node): The local node.
+ """
+ if msg.SenderID != msg.OriginatorID:
+ logger.error('connection request must originate from peer; %s not %s',
+ msg.OriginatorID, msg.SenderID)
+ return
+
+ name = msg.Name
+ if not name:
+ name = msg.OriginatorID[:8]
+
+ orignode = node.Node(address=msg.NetAddress,
+ identifier=msg.OriginatorID,
+ name=name)
+ orignode.Enabled = True
+ gossiper.add_node(orignode)
+
+ reply = ConnectReplyMessage()
+ reply.InReplyTo = msg.Identifier
+ gossiper.send_message(reply, msg.OriginatorID)
+
+
+class ConnectReplyMessage(message.Message):
+ """Connection reply messages are sent to a peer node in response to
+ an incoming connection request message.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ InReplyTo (str): The node identifier of the originator of the
+ connection request message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ """
+ MessageType = "/" + __name__ + "/ConnectReply"
+
+ def __init__(self, minfo={}):
+ """Constructor for the ConnectReplyMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(ConnectReplyMessage, self).__init__(minfo)
+ self.InReplyTo = minfo.get('InReplyTo', common.NullIdentifier)
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(ConnectReplyMessage, self).dump()
+ return result
+
+
+def connect_reply_handler(msg, gossiper):
+ """Handles connection reply events.
+
+ When a connection reply message arrives, the replying node is added
+ as a peer.
+
+ Args:
+ msg (Message): The received connection reply message.
+ gossiper (Node): The local node.
+ """
+ logger.info('received connect confirmation from node %s',
+ gossiper.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8]))
+
+ # we have confirmation that this peer is currently up, so add it to our
+ # list
+ if msg.OriginatorID in gossiper.NodeMap:
+ logger.info('mark node %s as enabled',
+ gossiper.NodeMap[msg.OriginatorID])
+ gossiper.NodeMap[msg.OriginatorID].Enabled = True
+
+
+class DisconnectRequestMessage(message.Message):
+ """Disconnection request messages represent a request from a node
+ to disconnect from the gossip network.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ Reliable (bool): Whether or not the message requires reliable
+ delivery.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ """
+ MessageType = "/" + __name__ + "/DisconnectRequest"
+
+ def __init__(self, minfo={}):
+ """Constructor for the DisconnectRequestMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(DisconnectRequestMessage, self).__init__(minfo)
+ self.Reliable = False
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = False
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ return super(DisconnectRequestMessage, self).dump()
+
+
+def disconnect_request_handler(msg, gossiper):
+ """Handles disconnection request events.
+
+ When a disconnection request message arrives, the replying node is
+ removed as a peer.
+
+ Args:
+ msg (Message): The received disconnection request message.
+ gossiper (Node): The local node.
+ """
+ logger.warn('received disconnect message from node %s',
+ gossiper.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8]))
+
+ # if this node is one of our peers, then drop it
+ if msg.OriginatorID in gossiper.NodeMap:
+ logger.warn('mark peer node %s as disabled',
+ gossiper.NodeMap[msg.OriginatorID])
+ gossiper.drop_node(msg.OriginatorID)
+
+
+class KeepAliveMessage(message.Message):
+ """Keep alive messages represent a request from a node to keep the
+ conneciton alive.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ Reliable (bool): Whether or not the message requires reliable
+ delivery.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ """
+ MessageType = "/" + __name__ + "/KeepAlive"
+
+ def __init__(self, minfo={}):
+ """Constructor for the KeepAliveMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(KeepAliveMessage, self).__init__(minfo)
+ self.Reliable = False
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = False
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ return super(KeepAliveMessage, self).dump()
+
+
+def keep_alive_handler(msg, gossiper):
+ """Handles keep alive events.
+
+ Args:
+ msg (Message): The received disconnection request message.
+ gossiper (Node): The local node.
+ """
+ pass
diff --git a/gossip/messages/gossip_debug.py b/gossip/messages/gossip_debug.py
new file mode 100644
index 0000000000..3e90331253
--- /dev/null
+++ b/gossip/messages/gossip_debug.py
@@ -0,0 +1,375 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+This module implements classes derived from Message for representing
+debug messages, including pings, dump connections, dump peer stats,
+reset peer stats, dump node stats, and reset stats. It also defines
+handler methods to be called when these message types arrive.
+"""
+
+import logging
+import string
+import time
+
+from gossip import message
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(gossiper):
+ """Registers the debug-related message handlers for a node.
+
+ Args:
+ gossiper (Node): The node to register message handlers on.
+ """
+ gossiper.register_message_handler(PingMessage, _pinghandler)
+ gossiper.register_message_handler(DumpConnectionsMessage,
+ _dumpconnectionshandler)
+ gossiper.register_message_handler(DumpPeerStatsMessage, _dumppeerhandler)
+ gossiper.register_message_handler(ResetPeerStatsMessage, _resetpeerhandler)
+ gossiper.register_message_handler(DumpNodeStatsMessage, _dumpstatshandler)
+ gossiper.register_message_handler(ResetStatsMessage, _resetstatshandler)
+
+
+class PingMessage(message.Message):
+ """Ping messages are sent to a peer node to verify connectivity.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ """
+ MessageType = "/" + __name__ + "/Ping"
+
+ def __init__(self, minfo={}):
+ """Constructor for the PingMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(PingMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = True
+ self.IsForward = True
+ self.IsReliable = True
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(PingMessage, self).dump()
+ return result
+
+
+def _pinghandler(msg, gossiper):
+ logger.warn("ping, %s, %s", time.time(), msg.Identifier[:8])
+
+
+class DumpConnectionsMessage(message.Message):
+ """Dump connections messages are sent to a peer node to request
+ it to log enabled and disabled connections information.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ """
+ MessageType = "/" + __name__ + "/DumpConnections"
+
+ def __init__(self, minfo={}):
+ """Constructor for the DumpConnectionsMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(DumpConnectionsMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(DumpConnectionsMessage, self).dump()
+ return result
+
+
+def _dumpconnectionshandler(msg, gossiper):
+ identifier = "{0}, {1:0.2f}, {2}".format(gossiper.LocalNode, time.time(),
+ msg.Identifier[:8])
+
+ enabled = []
+ disabled = []
+ for peer in gossiper.peer_list(allflag=True):
+ if peer.Enabled:
+ enabled.append(peer.Name)
+ else:
+ disabled.append(peer.Name)
+
+ logger.info("connections, %s, enabled, %s", identifier,
+ string.join(enabled, ', '))
+ logger.info("connections, %s, disabled, %s", identifier,
+ string.join(disabled, ', '))
+
+
+class DumpPeerStatsMessage(message.Message):
+ """Dump peer stats messages are sent to a peer node to request
+ it to log statistics about specified peer connections.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ PeerIDList (list): A list of peers to dump stats for.
+ MetricIDList (list): A list of stats to dump.
+ """
+ MessageType = "/" + __name__ + "/DumpPeerStats"
+
+ def __init__(self, minfo={}):
+ """Constructor for the DumpPeerStatsMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(DumpPeerStatsMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.PeerIDList = minfo.get('PeerIDList', [])
+ self.MetricList = minfo.get('MetricList', [])
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(DumpPeerStatsMessage, self).dump()
+
+ result['PeerIDList'] = []
+ for peerid in self.PeerIDList:
+ result['PeerIDList'].append(peerid)
+
+ result['MetricList'] = []
+ for peerid in self.MetricList:
+ result['MetricList'].append(peerid)
+
+ return result
+
+
+def _dumppeerhandler(msg, gossiper):
+ idlist = msg.PeerIDList
+ if len(idlist) == 0:
+ idlist = gossiper.peer_id_list()
+
+ for peer in gossiper.NodeMap.itervalues():
+ if peer.Identifier in idlist or peer.Name in idlist:
+ if peer.Enabled:
+ peer.dump_peer_stats(msg.Identifier, msg.MetricList)
+
+
+class ResetPeerStatsMessage(message.Message):
+ """Reset peer stats messages are sent to a peer node to request
+ it to reset statistics about specified peer connections.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ PeerIDList (list): A list of peers to reset stats for.
+ MetricIDList (list): A list of stats to reset.
+ """
+ MessageType = "/" + __name__ + "/ResetPeerStats"
+
+ def __init__(self, minfo={}):
+ """Constructor for the ResetPeerStatsMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(ResetPeerStatsMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.PeerIDList = minfo.get('PeerIDList', [])
+ self.MetricList = minfo.get('MetricList', [])
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(ResetPeerStatsMessage, self).dump()
+
+ result['PeerIDList'] = []
+ for peerid in self.PeerIDList:
+ result['PeerIDList'].append(peerid)
+
+ result['MetricList'] = []
+ for peerid in self.MetricList:
+ result['MetricList'].append(peerid)
+
+ return result
+
+
+def _resetpeerhandler(msg, gossiper):
+ idlist = msg.PeerIDList
+ if len(idlist) == 0:
+ idlist = gossiper.peer_id_list()
+
+ for peer in gossiper.NodeMap.itervalues():
+ if peer.Identifier in idlist or peer.Name in idlist:
+ if peer.Enabled:
+ peer.reset_peer_stats(msg.MetricList)
+
+
+class DumpNodeStatsMessage(message.Message):
+ """Dump node stats messages are sent to a peer node to request
+ it to dump statistics.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ DomainList (list): A list of domains to dump stats for.
+ MetricList (list): A list of stats to dump.
+ """
+ MessageType = "/" + __name__ + "/DumpNodeStats"
+
+ def __init__(self, minfo={}):
+ """Constructor for the DumpNodeStatsMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(DumpNodeStatsMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.DomainList = minfo.get('DomainList', [])
+ self.MetricList = minfo.get('MetricList', [])
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(DumpNodeStatsMessage, self).dump()
+
+ result['DomainList'] = []
+ for domain in self.DomainList:
+ result['DomainList'].append(domain)
+
+ result['MetricList'] = []
+ for metric in self.MetricList:
+ result['MetricList'].append(metric)
+
+ return result
+
+
+def _dumpstatshandler(msg, gossiper):
+ domains = gossiper.StatDomains.keys() if len(
+ msg.DomainList) == 0 else msg.DomainList
+ for domain in domains:
+ if domain in gossiper.StatDomains:
+ gossiper.StatDomains[domain].dump_stats(msg.Identifier,
+ msg.MetricList)
+
+
+class ResetStatsMessage(message.Message):
+ """Reset stats messages are sent to a peer node to request
+ it to reset statistics.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ DomainList (list): A list of domains to reset stats for.
+ MetricList (list): A list of stats to reset.
+ """
+ MessageType = "/" + __name__ + "/ResetStats"
+
+ def __init__(self, minfo={}):
+ """Constructor for the ResetStatsMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(ResetStatsMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.DomainList = minfo.get('DomainList', [])
+ self.MetricList = minfo.get('MetricList', [])
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(ResetStatsMessage, self).dump()
+
+ result['DomainList'] = []
+ for domain in self.DomainList:
+ result['DomainList'].append(domain)
+
+ result['MetricList'] = []
+ for metric in self.MetricList:
+ result['MetricList'].append(metric)
+
+ return result
+
+
+def _resetstatshandler(msg, gossiper):
+ domains = gossiper.StatDomains.keys() if len(
+ msg.DomainList) == 0 else msg.DomainList
+ for domain in domains:
+ if domain in gossiper.StatDomains:
+ gossiper.StatDomains[domain].reset_stats(msg.MetricList)
diff --git a/gossip/messages/random_walk_message.py b/gossip/messages/random_walk_message.py
new file mode 100644
index 0000000000..de7e9a1289
--- /dev/null
+++ b/gossip/messages/random_walk_message.py
@@ -0,0 +1,190 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+This module implements a Message-derived class for handling random walk
+messages. Random walk messages support extending the connectivity of the
+gossip network via the random walk topology.
+"""
+
+import logging
+import random
+
+from gossip.messages.connect_message import send_connection_request
+from gossip import message, node
+
+logger = logging.getLogger(__name__)
+
+MaxNumberOfConnections = 11
+
+
+def send_random_walk_message(gossiper):
+ """Sends a random walk message to a random peer.
+
+ Args:
+ gossiper (Node): The local node.
+ """
+ msg = RandomWalkMessage()
+ msg.NetHost = gossiper.LocalNode.NetHost
+ msg.NetPort = gossiper.LocalNode.NetPort
+ msg.NodeIdentifier = gossiper.LocalNode.Identifier
+ msg.Name = gossiper.LocalNode.Name
+
+ peers = gossiper.peer_id_list()
+ if len(peers) > 0:
+ peerid = random.choice(peers)
+ gossiper.send_message(msg, peerid)
+
+
+def register_message_handlers(gossiper):
+ """Registers the random-walk related message handlers for a node.
+
+ Args:
+ gossiper (Node): The node to register message handlers on.
+ """
+ gossiper.register_message_handler(RandomWalkMessage, random_walk_handler)
+
+
+class RandomWalkMessage(message.Message):
+ """Random walk messages are sent to a random peer to extend the
+ connectivity of the network.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ NetHost (str): Hostname or IP address identifying the node.
+ NetPort (int): The remote port number to connect to.
+ NodeIdentifier (str): The identifier of the originating node.
+ Name (str): The name of the connection.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ TimeToLive (int): How many 'steps' there are in the random walk.
+ When a random walk message is received, the TimeToLive value
+ is decremented and the message is retransmitted from the
+ receving node. This continues until TimeToLive reaches zero.
+ """
+ MessageType = "/" + __name__ + "/Topology/RandomWalk"
+
+ def __init__(self, minfo={}):
+ """Constructor for the RandomWalkMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(RandomWalkMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = False
+ self.IsReliable = True
+
+ self.NetHost = minfo.get('Host', "127.0.0.1")
+ self.NetPort = minfo.get('Port', 0)
+ self.NodeIdentifier = minfo.get('NodeIdentifier', '')
+ self.Name = minfo.get('Name', self.NodeIdentifier[:8])
+
+ self.TimeToLive = 8
+
+ @property
+ def NetAddress(self):
+ """Returns the host and port of the connection request message.
+
+ Returns:
+ ordered pair: (host, port).
+ """
+ return (self.NetHost, self.NetPort)
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(RandomWalkMessage, self).dump()
+
+ result['Host'] = self.NetHost
+ result['Port'] = self.NetPort
+ result['NodeIdentifier'] = self.NodeIdentifier
+ result['Name'] = self.Name
+
+ return result
+
+
+def random_connections():
+ """Determines how many random connections the node should attempt
+ to connect to.
+
+ Returns:
+ int: The number of random connections to attempt.
+ """
+ count = 0
+ value = random.randint(1, pow(2, MaxNumberOfConnections))
+ while value > 0:
+ value >>= 1
+ count += 1
+
+ return count
+
+
+def random_walk_handler(msg, gossiper):
+ """Function called when the gossiper receives a RandomWalkMessage
+ from one of its peers.
+
+ Args:
+ msg (Message): The received random walk message.
+ gossiper (Node): The local node.
+ """
+
+ if msg.OriginatorID == gossiper.LocalNode.Identifier:
+ logger.debug('node %s received its own random walk request, ignore',
+ gossiper.LocalNode)
+ return
+
+ logger.debug('random walk request %s from %s with ttl %d',
+ msg.Identifier[:8], msg.Name, msg.TimeToLive)
+
+ peers = gossiper.peer_id_list()
+
+ # if the source is not already one of our peers, then check to see if we
+ # should add it to our list
+ if msg.OriginatorID not in peers:
+ if len(peers) < random_connections():
+ logger.debug(
+ 'add connection to node %s based on random walk request %s',
+ msg.Name, msg.Identifier[:8])
+ onode = node.Node(address=msg.NetAddress,
+ identifier=msg.NodeIdentifier,
+ name=msg.Name)
+ onode.Enabled = True
+
+ send_connection_request(gossiper, onode)
+ return
+
+ # if there is still life in the message, then see if we should forward it
+ # to another node
+
+ if msg.TimeToLive > 0:
+ # see if we can find a peer other than the peer who forwarded the
+ # message to us, if not then we'll just drop the request
+
+ try:
+ peers.remove(msg.SenderID)
+ peers.remove(msg.OriginatorID)
+ except:
+ pass
+
+ if len(peers) > 0:
+ peerid = random.choice(peers)
+ gossiper.send_message(msg, peerid, initialize=False)
diff --git a/gossip/messages/shutdown_message.py b/gossip/messages/shutdown_message.py
new file mode 100644
index 0000000000..d384844b95
--- /dev/null
+++ b/gossip/messages/shutdown_message.py
@@ -0,0 +1,123 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+This module implements a ShutdownMessage class derived from Message for
+representing shutdown messages. It also defines a handler method for taking
+action when shutdown messages are received.
+"""
+
+import logging
+
+from twisted.internet import reactor
+
+from gossip import message
+
+logger = logging.getLogger(__name__)
+
+# Unless this is set we ignore shutdown messages, it should be set
+# to the identifier for a source that is allowed to execute the
+# shutdown message
+
+AdministrationNode = None
+
+
+def register_message_handlers(gossiper):
+ """Registers the shutdown-related message handlers for a node.
+
+ Args:
+ gossiper (Node): The node to register message handlers on.
+ """
+ gossiper.register_message_handler(ShutdownMessage, shutdown_handler)
+
+
+class ShutdownMessage(message.Message):
+ """Shutdown messages are sent to a peer node to initiate shutdown.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ NodeList (list): The list of nodes to shutdown.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ """
+ MessageType = "/" + __name__ + "/ShutdownMessage"
+
+ def __init__(self, minfo={}):
+ """Constructor for the ShutdownMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(ShutdownMessage, self).__init__(minfo)
+
+ # We are not going to hang around waiting for acks to come back
+ self.NodeList = minfo.get('NodeList', [])
+
+ # We are not going to hang around waiting for acks to come back
+ self.IsSystemMessage = True
+ self.IsForward = True
+ self.IsReliable = False
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(ShutdownMessage, self).dump()
+ result['NodeList'] = self.NodeList
+
+ return result
+
+
+def shutdown_handler(msg, gossiper):
+ """Handles shutdown events.
+
+ When a shutdown message arrives, the node checks to see if it is
+ included in the shutdown list and, if so, shuts down.
+
+ Args:
+ msg (Message): The recevied shutdown request message.
+ gossiper (Node): The local node.
+ """
+ if msg.OriginatorID != AdministrationNode:
+ logger.warn(
+ 'shutdown received from non-administrator; received from %s, '
+ 'expecting %s',
+ msg.OriginatorID, AdministrationNode)
+ return
+
+ if msg.NodeList and gossiper.LocalNode.Identifier not in msg.NodeList:
+ logger.warn('this node not included in shutdown list, %s',
+ msg.NodeList)
+ return
+
+ # Need to wait long enough for all the shutdown packets to be sent out
+ logging.warn('shutdown message received from %s', msg.OriginatorID)
+ reactor.callLater(1.0, shutdown, gossiper)
+
+
+def shutdown(gossiper):
+ """Callback for node shutdown.
+
+ Shuts down the gossip networking locally and stops the main event loop.
+
+ Args:
+ gossiper (Node): The local node.
+ """
+ gossiper.shutdown()
+ reactor.stop()
diff --git a/gossip/messages/topology_message.py b/gossip/messages/topology_message.py
new file mode 100644
index 0000000000..77f1eaa0b1
--- /dev/null
+++ b/gossip/messages/topology_message.py
@@ -0,0 +1,242 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import uuid
+
+from twisted.internet import reactor
+
+from gossip import message, node
+
+logger = logging.getLogger(__name__)
+
+TimeToWaitForTopologyProbe = 2.0
+CurrentTopologyRequestID = None
+CurrentTopologyResponseMap = {}
+
+
+def initiate_topology_probe(gossiper, callback):
+ """Broadcasts a topology message and establishes a callback timer
+ to handle the responses.
+
+ Args:
+ gossiper (Node): The local node.
+ callback (function): The callback argument to the
+ update_peers_from_topology_probe method.
+ """
+ global CurrentTopologyRequestID, CurrentTopologyResponseMap
+
+ # if there is a request being processed, then dont initiate another
+ if CurrentTopologyRequestID:
+ return
+
+ request = TopologyRequestMessage()
+ CurrentTopologyRequestID = request.Identifier
+
+ gossiper.broadcast_message(request)
+ reactor.callLater(TimeToWaitForTopologyProbe,
+ update_peers_from_topology_probe,
+ gossiper,
+ callback)
+
+
+def update_peers_from_topology_probe(gossiper, callback):
+ """Calls the passed in callback and resets global variables.
+
+ Args:
+ gossiper (Node): The local node.
+ callback (function): The function which should be called.
+ """
+ global CurrentTopologyRequestID, CurrentTopologyResponseMap
+
+ if callback:
+ callback(gossiper, CurrentTopologyResponseMap)
+
+ CurrentTopologyRequestID = None
+ CurrentTopologyResponseMap = {}
+
+
+def register_message_handlers(gossiper):
+ """Registers the topology-related message handlers for a node.
+
+ Args:
+ gossiper (Node): The node to register message handlers on.
+ """
+ gossiper.register_message_handler(TopologyRequestMessage,
+ topology_request_handler)
+ gossiper.register_message_handler(TopologyReplyMessage,
+ topology_reply_handler)
+
+
+class TopologyRequestMessage(message.Message):
+ """Topology request messages are sent to peer nodes to query the
+ connectivity of the peer nodes.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ """
+ MessageType = "/" + __name__ + "/ToplogyRequest"
+
+ def __init__(self, minfo={}):
+ """Constructor for the TopologyRequestMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(TopologyRequestMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.TimeToLive = 2
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(TopologyRequestMessage, self).dump()
+ return result
+
+
+def topology_request_handler(msg, gossiper):
+ """Handles incoming topology request messages.
+
+ Args:
+ msg (Message): The incoming topology message.
+ gossiper (Node): The local node.
+ """
+ logger.debug('responding to probe %s from node %s', msg.Identifier[:8],
+ msg.OriginatorID[:8])
+
+ if msg.OriginatorID == gossiper.LocalNode.Identifier:
+ logger.debug('node %s received its own topology request, ignore',
+ gossiper.LocalNode.Identifier[:8])
+ return
+
+ reply = TopologyReplyMessage()
+ reply.NetHost = gossiper.LocalNode.NetHost
+ reply.NetPort = gossiper.LocalNode.NetPort
+ reply.NodeIdentifier = gossiper.LocalNode.Identifier
+ reply.Name = gossiper.LocalNode.Name
+ reply.InReplyTo = msg.Identifier
+
+ for peer in gossiper.peer_list():
+ if peer.Enabled:
+ reply.Peers.append((peer.Identifier, peer.NetAddress))
+
+ gossiper.broadcast_message(reply)
+
+
+class TopologyReplyMessage(message.Message):
+ """Topology reply messages are sent in response to topology
+ request messages.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this is a system message.
+ System messages have special delivery priority rules.
+ IsForward (bool): Whether the message should be automatically
+ forwarded.
+ IsReliable (bool): Whether reliable delivery is required.
+ NetHost (str): Hostname or IP address identifying the node.
+ NetPort (int): The remote port number to connect to.
+ NodeIdentifier (str): The identifier of the remote node.
+ Name (str): The name of the originator.
+ Peers (list): A list of peers in the topology response.
+ InReplyTo (str): The identifier of the associated topology
+ request message.
+ """
+ MessageType = "/" + __name__ + "/TopologyReply"
+
+ def __init__(self, minfo={}):
+ super(TopologyReplyMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.NetHost = minfo.get('Host', "127.0.0.1")
+ self.NetPort = minfo.get('Port', 0)
+ self.NodeIdentifier = minfo.get('NodeIdentifier', '')
+ self.Name = minfo.get('Name', self.OriginatorID[:8])
+
+ self.Peers = minfo.get('Peers', [])
+ self.InReplyTo = minfo.get('InReplyTo', str(uuid.UUID(int=0)))
+
+ @property
+ def NetAddress(self):
+ """Returns the host and port of the topology reply message.
+
+ Returns:
+ ordered pair: (host, port).
+ """
+ return (self.NetHost, self.NetPort)
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ A mapping of object attribute names to values.
+ """
+ result = super(TopologyReplyMessage, self).dump()
+
+ result['Host'] = self.NetHost
+ result['Port'] = self.NetPort
+ result['NodeIdentifier'] = self.NodeIdentifier
+ result['Name'] = self.Name
+
+ result['Peers'] = self.Peers
+ result['InReplyTo'] = self.InReplyTo
+
+ return result
+
+
+def topology_reply_handler(msg, gossiper):
+ """Handles incoming topology reply messages.
+
+ Args:
+ msg (Message): The incoming topology message.
+ gossiper (Node): The local node.
+ """
+ logger.debug('received reply to probe %s from node %s', msg.InReplyTo[:8],
+ msg.Name)
+
+ global CurrentTopologyRequestID, CurrentTopologyResponseMap
+
+ # Because of the multiple paths through the overlay network, the topology
+ # request can arrive after replies have started to arrive so we initialize
+ # the current set of requests with the replies that come in
+ if not CurrentTopologyRequestID:
+ CurrentTopologyRequestID = msg.InReplyTo
+ reactor.callLater(TimeToWaitForTopologyProbe,
+ update_peers_from_topology_probe, gossiper, None)
+
+ if msg.InReplyTo != CurrentTopologyRequestID:
+ logger.debug('reply for a different probe, %s instead of %s',
+ msg.InReplyTo[:8], CurrentTopologyRequestID[:8])
+ return
+
+ peer = node.Node(address=msg.NetAddress,
+ identifier=msg.NodeIdentifier,
+ name=msg.Name)
+ CurrentTopologyResponseMap[peer] = msg.Peers
diff --git a/gossip/node.py b/gossip/node.py
new file mode 100644
index 0000000000..1ca1a38dbd
--- /dev/null
+++ b/gossip/node.py
@@ -0,0 +1,424 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+This module defines the Node class for the Gossip protocol and the
+RoundTripEstimator and TransmissionQueue classes, both of which are used
+by the Node implementation.
+"""
+
+import logging
+import random
+import time
+from heapq import heappop, heappush, heapify
+
+from gossip import stats
+from gossip import token_bucket
+
+logger = logging.getLogger(__name__)
+
+
+class Node(object):
+ """The Node class represents network peers in the gossip protocol.
+
+ Attributes:
+ NetHost (str): hostname or IP address identifying the node.
+ SigningKey (str): a PEM formatted signing key.
+ Identifier (str): an identifier for the node.
+ Name (str): a short, human-readable name for the node.
+ Enabled (bool): whether or not the node is active. This is set from
+ outside the Node class.
+ Estimator (RoundTripEstimator): tracks network timing between nodes.
+ MessageQ (TransmissionQueue): a transmission queue ordered by time
+ to send.
+ TokenBucket (token_bucket): limits the average rate of data flow.
+ FixedRandomDelay (float): a random delay in the range of DelayRange.
+ Delay (float): a random delay for the node using either a uniform
+ or an exponential distribution depending on the value of the
+ UseFixedDelay boolean. By default, a uniform random distribution
+ is used.
+ Stats (stats): tracks statistics associated with node communication.
+ MissedTicks (int): tracks the number of time slices where no messages
+ are received from the node. If MissedTicks exceeds 10, the node
+ is considered disconnected (see Gossip._keepalive()).
+ UseFixedDelay (bool): whether or not to use a uniform or exponential
+ random distribution. If UseFixedDelay is True (default), a
+ uniform distribution in DelayRange is used.
+ DelayRange (list of floats): specifies the floor and ceiling for
+ the uniform random value of FixedRandomDelay.
+ DistributionLambda (float): the lambda value provided to the
+ exponential random function if UsedFixedDelay is false.
+
+ """
+ UseFixedDelay = True
+ DelayRange = [0.1, 0.4]
+ DistributionLambda = 10.0
+
+ def __init__(self,
+ address=(None, None),
+ identifier=None,
+ signingkey=None,
+ name=None,
+ rate=None,
+ capacity=None):
+ """Constructor for the Node class.
+
+ Args:
+ address (ordered pair of str): address of the node in the form
+ of (host, port).
+ identifier (str): an identifier for the node.
+ signingkey (str): used to create a signing key, in PEM format.
+ name (str): a short, human-readable name for the node.
+ rate (int): the number of tokens to be added to the TokenBucket
+ per drip.
+ capacity (int): the total capacity of tokens in the node's
+ TokenBucket.
+ """
+
+ self.NetHost = address[0]
+ self.NetPort = address[1]
+
+ self.SigningKey = signingkey
+ self.Identifier = identifier
+
+ self.Name = name if name else self.Identifier[:8]
+ self.Enabled = False
+
+ self.Estimator = RoundTripEstimator()
+ self.MessageQ = TransmissionQueue()
+ self.TokenBucket = token_bucket.TokenBucket(rate, capacity)
+
+ self.FixedRandomDelay = random.uniform(*self.DelayRange)
+ if self.UseFixedDelay:
+ self.Delay = self._fixeddelay
+ else:
+ self.Delay = self._randomdelay
+ self.Stats = None
+
+ self.MissedTicks = 0
+
+ @property
+ def NetAddress(self):
+ """Returns an ordered pair containing the host and port number of
+ the node.
+ """
+ return (self.NetHost, self.NetPort)
+
+ def __str__(self):
+ return self.Name
+
+ def _randomdelay(self):
+ return random.expovariate(self.DistributionLambda)
+
+ def _fixeddelay(self):
+ return self.FixedRandomDelay
+
+ def initialize_stats(self, localnode):
+ """Initializes statistics collection for the node.
+
+ Args:
+ localnode (Node): the local node. Statistics are relative to
+ the local node and the remote node.
+ """
+ self.Stats = stats.Stats(localnode.Name, self.Name)
+ self.Stats.add_metric(stats.Value('Identifier', self.Identifier))
+ self.Stats.add_metric(stats.Value('Address', "{0}:{1}".format(
+ self.NetHost, self.NetPort)))
+ self.Stats.add_metric(stats.Sample('Enabled', lambda: self.Enabled))
+ self.Stats.add_metric(stats.Sample('MessageQueue',
+ lambda: str(self.MessageQ)))
+ self.Stats.add_metric(stats.Sample('MessageQueueLength',
+ lambda: self.MessageQ.Count))
+ self.Stats.add_metric(stats.Sample('RoundTripEstimate',
+ lambda: self.Estimator.RTO))
+
+ def enqueue_message(self, msg, now):
+ """Enqueue a message for future delivery.
+
+ System messages are queued for immediate delivery, others are
+ queued at some point in the future determined by the configured
+ delay.
+
+ Args:
+ msg (message): the message to enqueue.
+ now (float): the current time.
+
+ """
+ timetosend = 0 if msg.IsSystemMessage else now + self.Delay()
+ self.MessageQ.enqueue_message(msg, timetosend)
+
+ def dequeue_message(self, msg):
+ """Remove a message from the transmission queue.
+
+ Args:
+ msg (message): the message to remove.
+
+ """
+ return self.MessageQ.dequeue_message(msg)
+
+ def get_next_message(self, now):
+ """Removes the next sendable message from the queue and returns it.
+
+ A message is sendable if it is a system message or if there are
+ sufficient tokens in the token bucket to support the length of the
+ message.
+
+ Args:
+ now (float): the current time.
+
+ Returns:
+ message: if a sendable message is found it is returned,
+ otherwise None
+
+ """
+ if self.MessageQ.Count > 0:
+ info = self.MessageQ.Head
+ if info is None:
+ return None
+
+ (timetosend, msg) = info
+ if timetosend < now:
+ if msg.IsSystemMessage or self.TokenBucket.consume(len(msg)):
+ self.MessageQ.dequeue_message(msg)
+ return msg
+
+ return None
+
+ def message_delivered(self, msg, rtt):
+ """Updates the RoundTripEstimator based on packet round trip
+ time and dequeues the specified message.
+
+ Args:
+ msg (message): the message to remove.
+ rtt (int): round trip time between outgoing packet and
+ incoming packet.
+ """
+ self.Estimator.update(rtt)
+ self.MessageQ.dequeue_message(msg)
+
+ def message_dropped(self, msg, now=None):
+ """Updates the RoundTripEstimator based on the assertion that
+ the message has been dropped and re-enqueues the outgoing
+ message for re-delivery.
+
+ Args:
+ msg (message): the message to re-send.
+ now (int): current time since the epoch in seconds.
+ """
+ if not now:
+ now = time.time()
+
+ self.Estimator.backoff()
+ self.enqueue_message(msg, now)
+
+ def reset_ticks(self):
+ """Resets the MissedTicks counter to zero.
+ """
+ self.MissedTicks = 0
+
+ def bump_ticks(self):
+ """Increments the MissedTicks counter.
+ """
+ self.MissedTicks += 1
+
+ def dump_peer_stats(self, identifier, metrics):
+ """Dumps statistics for the node to the log.
+
+ Args:
+ identifier (str): the batchid for logging statistics.
+ metrics (list of str): a list of metrics to dump.
+ """
+ self.Stats.dump_stats(identifier, metrics)
+
+ def reset_peer_stats(self, metrics):
+ """Resets statistics for the node.
+
+ Args:
+ metrics (list of str): a list of metrics to reset.
+ """
+ self.Stats.reset_stats(metrics)
+
+ def _clone(self):
+ """Create a copy of the node, primarily useful for debugging
+ multiple instances of a gossiper in one process.
+ """
+ return Node(self.Identifier, self.NetAddress)
+
+
+class RoundTripEstimator(object):
+ # Minimum and Maximum RTO measured in seconds
+ MinimumRTO = 1.0
+ MaximumRTO = 60.0
+ BackoffRate = 2.0
+
+ MinResolution = 0.025
+ ALPHA = 0.125
+ BETA = 0.25
+ K = 4.0
+
+ def __init__(self):
+ self.RTO = self.MinimumRTO
+ self._SRTT = 0.0
+ self._RTTVAR = 0.0
+
+ def update(self, measuredrto):
+ """Updates estimator values based on measured round trip message
+ time.
+
+ Args:
+ measuredrto (int): actual time from packet transmission to
+ ack reception.
+ """
+
+ if self._RTTVAR == 0.0:
+ self._SRTT = measuredrto
+ self._RTTVAR = measuredrto * 0.5
+ else:
+ self._RTTVAR = (1.0 - self.BETA) * self._RTTVAR + self.BETA * abs(
+ self._SRTT - measuredrto)
+ self._SRTT = (1.0 -
+ self.ALPHA) * self._SRTT + self.ALPHA * measuredrto
+
+ self.RTO = self._SRTT + max(self.MinResolution, self.K * self._RTTVAR)
+ self.RTO = max(self.MinimumRTO, min(self.MaximumRTO, self.RTO))
+
+ def backoff(self):
+ """
+ """
+ self._SRTT = 0.0
+ self._RTTVAR = 0.0
+
+ self.RTO = min(self.RTO * self.BackoffRate, self.MaximumRTO)
+
+
+class TransmissionQueue(object):
+ """Implements a transmission queue ordered by time to send. A
+ heap is used to order message identifiers by transmission time.
+
+ Note:
+ The heap is not authoritative. Because messages can be queued
+ and dequeued, elements in the heap might become out of date.
+ """
+
+ def __init__(self):
+ self._messages = {}
+ self._times = {} # this allows reinsertion of a message
+ self._heap = []
+
+ def __str__(self):
+ idlist = self._times.keys()
+ if len(idlist) > 4:
+ idlist = idlist[:4]
+ idlist.append('...')
+
+ return '[' + ', '.join(map(lambda id: id[:8], idlist)) + ']'
+
+ def enqueue_message(self, msg, timetosend):
+ """Adds a message to the transmission queue.
+
+ At most one instance of a message can exist in the queue at a
+ time however multiple references may exist in the heap.
+
+ Args:
+ msg (message): the message to send.
+ timetosend (float): python time when message should be sent,
+ 0 for system message.
+ """
+ messageid = msg.Identifier
+ assert messageid not in self._messages
+ assert messageid not in self._times
+
+ self._messages[messageid] = msg
+ self._times[messageid] = timetosend
+
+ heappush(self._heap, (timetosend, messageid))
+
+ def dequeue_message(self, msg):
+ """Removes a message from the transmission queue if it exists.
+
+ Rebuild the heap if necessary, but do not explicitly remove
+ the entry from the heap.
+
+ Args:
+ msg (message): the message to remove.
+ """
+
+ self._messages.pop(msg.Identifier, None)
+ self._times.pop(msg.Identifier, None)
+
+ self._buildheap()
+
+ @property
+ def Head(self):
+ """Returns the next message in the transmission queue and the time
+ when it should be sent.
+ """
+ self._trimheap()
+ if len(self._heap) == 0:
+ return None
+
+ (timetosend, messageid) = self._heap[0]
+ assert messageid in self._messages
+ assert messageid in self._times
+
+ return (timetosend, self._messages[messageid])
+
+ @property
+ def Count(self):
+ """Returns a count of the number of messages in the queue.
+ """
+ return len(self._times)
+
+ @property
+ def Messages(self):
+ """Returns a list of the message identifiers in the queue, primarily
+ used for debugging.
+ """
+ return self._times.keys()
+
+ def _trimheap(self):
+ """
+ Remove entries in the heap that are no longer valid. Since the heap
+ is not rebuilt when messages are dequeued, there may be invalid
+ entries in the heap.
+ """
+
+ while True:
+ # make sure we haven't emptied the heap
+ if len(self._heap) == 0:
+ return
+ (timetosend, messageid) = self._heap[0]
+
+ # and see if the pair in the heap holds the current tranmission
+ # time for the message id
+ if (messageid in self._times
+ and self._times[messageid] == timetosend):
+ assert messageid in self._messages
+ return
+
+ heappop(self._heap)
+
+ def _buildheap(self):
+ """
+ Rebuild the heap if necessary. This should only happen when
+ a large number of messages have been dequeued
+ """
+
+ if 2 * len(self._times) < len(self._heap):
+ self._heap = []
+ for messageid, timetosend in self._times.iteritems():
+ assert messageid in self._messages
+ self._heap.append((timetosend, messageid))
+
+ heapify(self._heap)
diff --git a/gossip/signed_object.py b/gossip/signed_object.py
new file mode 100644
index 0000000000..49575873d8
--- /dev/null
+++ b/gossip/signed_object.py
@@ -0,0 +1,252 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+"""
+This module defines the SignedObject class which processes and validates
+objects signed by a signing key.
+"""
+
+import hashlib
+import logging
+
+import pybitcointools
+
+from gossip.ECDSA import ECDSARecoverModule as nativeECDSA
+from gossip.common import dict2cbor
+
+logger = logging.getLogger(__name__)
+
+
+def generate_identifier(signingkey):
+ """Generates encoded version of the public key associated with
+ signingkey.
+
+ Args:
+ signingkey (str): A private key.
+
+ Returns:
+ str: An encoded 'address' associated with the public key.
+ """
+ return pybitcointools.pubtoaddr(pybitcointools.privtopub(signingkey))
+
+
+def generate_signing_key(wifstr=None):
+ """Returns a decoded signing key associated with wifstr or generates
+ a random signing key.
+
+ Args:
+ wifstr (str): A private key in wif format.
+
+ Returns:
+ str: a signing key.
+ """
+ if wifstr:
+ return pybitcointools.decode_privkey(wifstr, 'wif')
+
+ return pybitcointools.random_key()
+
+
+def get_verifying_key(serialized_msg, serialized_sig):
+ """Attempts to recover a public key from a message and a signature.
+
+ Args:
+ serialized_msg (str): A serialized message.
+ serialized_sig (str): A serialized signature.
+
+ Returns:
+ str: a public key.
+ """
+ v, r, s = pybitcointools.decode_sig(serialized_sig)
+ msghash = pybitcointools.electrum_sig_hash(serialized_msg)
+ z = pybitcointools.hash_to_int(msghash)
+ yBit = v - 27
+ try:
+ pubkey = nativeECDSA.recoverPubKeyFromSig(
+ str(z), str(r), str(s), int(yBit))
+ except Exception as ex:
+ logger.warn('Unable to extract public key from signature' + ex.args[0])
+ return ""
+ pubkey = pubkey.translate(None,
+ 'h') # strip out hex indicators from opencpp
+ pubkey = "04" + pubkey # add header to match pybitcointools format
+ return pubkey
+
+
+class SignedObject(object):
+ """Implements a base class for processing & validating signed objects.
+
+ Attributes:
+ Signature (str): The signature used to sign the object.
+ SignatureKey (str): The name of the key related to the signature.
+ Used to build dict return types.
+
+ """
+
+ def __init__(self, minfo={}, signkey='Signature'):
+ """Constructor for the SignedObject class.
+
+ Args:
+ minfo (dict): object data
+ signkey (str): the field name for the signature within the
+ object data
+ """
+ self.Signature = minfo.get(signkey)
+ self.SignatureKey = signkey
+
+ self._identifier = hashlib.sha256(self.Signature).hexdigest(
+ ) if self.Signature else None
+ self._originatorid = None
+ self._verifyingkey = None
+
+ self._data = None
+
+ def __repr__(self):
+ if not self._data:
+ self._data = self.serialize()
+ return self._data
+
+ @property
+ def Identifier(self):
+ """Returns a unique identifier for the transaction.
+
+ Note that the signature is really the only unique identifier,
+ but the first 16 bytes should be sufficient for testing purposes.
+
+ Returns:
+ str: The first 16 characters of a sha256 hexdigest.
+ """
+ assert self.Signature
+
+ if not self._identifier:
+ self._identifier = hashlib.sha256(self.Signature).hexdigest()
+
+ return self._identifier[:16]
+
+ @property
+ def OriginatorID(self):
+ """Return the address of the object originator based on the
+ verifying key derived from the object's signature.
+
+ Returns:
+ str: The address of the signer of the object.
+ """
+ assert self.Signature
+
+ if not self._verifyingkey:
+ serialized = self.serialize(signable=True)
+ self._verifyingkey = get_verifying_key(serialized, self.Signature)
+ self._originatorid = pybitcointools.pubtoaddr(self._verifyingkey)
+
+ return self._originatorid
+
+ def is_valid(self, store):
+ """Determines if the signature on the object is valid.
+
+ Args:
+ store: Unused argument.
+
+ Returns:
+ bool: True if the signature on the object is valid, False
+ otherwise.
+
+ """
+ return self.verify_signature()
+
+ def verify_signature(self, originatorid=None):
+ """Uses the signature to verify that a message came from an
+ originator.
+
+ Often this is simply used to initialize the originatorid field
+ for the message.
+
+ Args:
+ originatorid (str): The address of the originator of the
+ object.
+
+ Returns:
+ bool: True if the passed in originatorid is equal to the
+ originator of the object OR if the originatorid passed
+ in is None. False otherwise.
+ """
+
+ try:
+ assert self.Signature
+
+ if not self._verifyingkey:
+ serialized = self.serialize(signable=True)
+ self._verifyingkey = get_verifying_key(serialized,
+ self.Signature)
+ self._originatorid = pybitcointools.pubtoaddr(
+ self._verifyingkey)
+
+ return originatorid is None or self._originatorid == originatorid
+
+ except:
+ logger.exception('unable to verify transaction signature')
+ return False
+
+ def sign_from_node(self, node):
+ """Generates the signature from the signing key stored in a node.
+
+ Args:
+ node (Node): The node providing the signing key.
+ """
+ assert node.SigningKey
+ return self.sign_object(node.SigningKey)
+
+ def sign_object(self, signingkey):
+ """Generates a string signature for the object using the signing
+ key.
+
+ Args:
+ signingkey (str): hex encoded private key
+ """
+
+ serialized = self.serialize(signable=True)
+ self.Signature = pybitcointools.ecdsa_sign(serialized, signingkey)
+
+ if not self._verifyingkey:
+ self._verifyingkey = get_verifying_key(serialized, self.Signature)
+ self._originatorid = pybitcointools.pubtoaddr(self._verifyingkey)
+
+ self._identifier = hashlib.sha256(self.Signature).hexdigest()
+
+ def serialize(self, signable=False):
+ """Generates a CBOR serialized dict containing the a SignatureKey
+ to Signature mapping.
+
+ Args:
+ signable (bool): if signable is True, self.SignatureKey is
+ removed from the dict prior to serialization to CBOR.
+
+ Returns:
+ bytes: a CBOR representation of a SignatureKey to Signature
+ mapping.
+ """
+ dump = self.dump()
+
+ if signable and self.SignatureKey in dump:
+ del dump[self.SignatureKey]
+
+ return dict2cbor(dump)
+
+ def dump(self):
+ """Builds a dict containing a mapping of SignatureKey to Signature.
+
+ Returns:
+ dict: a map containing SignatureKey:Signature.
+ """
+ result = {self.SignatureKey: self.Signature}
+ return result
diff --git a/gossip/stats.py b/gossip/stats.py
new file mode 100644
index 0000000000..85d264123b
--- /dev/null
+++ b/gossip/stats.py
@@ -0,0 +1,336 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+This module defines the Stats class, which manages statistics about the
+gossiper node. Additional supporting classes include: Metric, Value,
+Counter, MapCounter, Average, and Sample.
+"""
+
+import logging
+import time
+
+from gossip import common
+
+logger = logging.getLogger(__name__)
+
+
+class Stats(object):
+ """The Stats class manages a set of Metrics about a Node.
+
+ Attributes:
+ NodeIdentifier (str): The node identifier the statistics
+ are associated with.
+ DomainIdentifier (str): The domain to which the statistics
+ belong. Used for developer categorization purposes when
+ the statistics are logged. Current values in use include
+ 'packet', 'message', 'ledger', etc.
+ Metrics (dict): A map of associated metrics.
+
+ """
+
+ def __init__(self, nodeid, statid):
+ """Constructor for the Stats class.
+
+ Args:
+ nodeid (str): The node identifier the statistics are
+ associated with.
+ statid (str): The domain to which the statistics belong.
+ """
+ self.NodeIdentifier = nodeid
+ self.DomainIdentifier = statid
+ self.Metrics = {}
+
+ def add_metric(self, metric):
+ """Adds a Metric to the Stats object.
+
+ Args:
+ metric (Metric): The metric to add to the Stats object.
+ """
+ self.Metrics[metric.Name] = metric
+
+ def __getattr__(self, attr):
+ if attr in self.Metrics:
+ return self.Metrics[attr]
+
+ raise AttributeError("no metric of type %r", attr)
+
+ def dump_stats(self, batchid, metrics=[]):
+ """Dumps associated metrics information to the log.
+
+ Args:
+ batchid (str): An identifier for correlating logged stats
+ output with an event.
+ metrics (list of Metric): A list of metrics to dump.
+ """
+ if len(metrics) == 0:
+ metrics = self.Metrics.keys()
+
+ metrics.sort()
+
+ identifier = "{0}, {1:0.2f}, {2}, {3}".format(self.NodeIdentifier,
+ time.time(), batchid[:8],
+ self.DomainIdentifier)
+ for metric in metrics:
+ if metric in self.Metrics:
+ self.Metrics[metric].dump_metric(identifier)
+
+ def reset_stats(self, metrics=[]):
+ """Resets the specified metrics.
+
+ If no metrics are provided, all metrics are reset.
+
+ Args:
+ metrics (list of Metric): A list of metrics to reset.
+ """
+ if len(metrics) == 0:
+ metrics = self.Metrics.keys()
+
+ logger.info('metric, %s, %0.2f, %s, %s', self.NodeIdentifier,
+ time.time(), common.NullIdentifier[:8], 'reset')
+ for metric in metrics:
+ if metric in self.Metrics:
+ self.Metrics[metric].reset()
+
+
+class Metric(object):
+ """The Metric class acts as a base class for a number of specific
+ Metric types, including Value, Counter, MapCounter, Average, and
+ Sample.
+
+ Attributes:
+ Name (str): the name of the metric.
+
+ """
+
+ def __init__(self, name):
+ """Constructor for the Metric class.
+
+ Args:
+ name (str): the name of the metric.
+ """
+ self.Name = name
+
+ def dump(self, *args):
+ """Writes the provided args to a logger entry.
+
+ Args:
+ args (list): a list of arguments to append to the logger
+ entry.
+ """
+ logger.info("metric, %s", ", ".join(map(lambda x: str(x), args)))
+
+ def dump_metric(self, identifier):
+ """Writes a logger entry containing the provided identifier and
+ the metric name.
+
+ Args:
+ identifier (str): The identifier to log.
+ """
+ self.dump(identifier, self.Name)
+
+ def reset(self):
+ """Base class reset of associated measure.
+
+ Since the base Metric class doesn't track a measure, no action
+ is taken.
+ """
+ pass
+
+
+class Value(Metric):
+ """The Value class extends Metric to track a single associated
+ value.
+
+ Attributes:
+ Value: The value to track.
+ """
+
+ def __init__(self, name, value):
+ """Constructor for the Value class.
+
+ Args:
+ name (str): The name of the metric.
+ value: The value to track.
+ """
+ super(Value, self).__init__(name)
+ self.Value = value
+
+ def dump_metric(self, identifier):
+ """Writes a logger entry containing the provided identifier,
+ the metric name, and the metric value.
+
+ Args:
+ identifier (str): The identifier to log.
+ """
+ self.dump(identifier, self.Name, self.Value)
+
+
+class Counter(Metric):
+ """The Counter class extends Metric to track a counter value.
+
+ Attributes:
+ Value (int): The counter value.
+ """
+
+ def __init__(self, name):
+ """Constructor for the Counter class.
+
+ Args:
+ name (str): The name of the metric.
+ """
+ super(Counter, self).__init__(name)
+ self.reset()
+
+ def increment(self, value=1):
+ """Adds to the metric's current value.
+
+ Args:
+ value (int): the amount to add to the metric's value.
+ Defaults to 1.
+ """
+ self.Value += int(value)
+
+ def dump_metric(self, identifier):
+ """Writes a logger entry containing the provided identifier,
+ the metric name, and the metric value.
+
+ Args:
+ identifier (str): The identifier to log.
+ """
+ self.dump(identifier, self.Name, self.Value)
+
+ def reset(self):
+ """Resets the value of the metric to zero.
+ """
+ self.Value = 0
+
+
+class MapCounter(Metric):
+ """The MapCounter class extends Metric to track a set of key/value
+ counters.
+
+ Attributes:
+ Values (dict): A map of named counter values.
+ """
+
+ def __init__(self, name):
+ """Constructor for the MapCounter class.
+
+ Args:
+ name (str): The name of the metric.
+ """
+ super(MapCounter, self).__init__(name)
+ self.reset()
+
+ def increment(self, key, value=1):
+ """Adds to the value of 'key' within the metric.
+
+ Args:
+ key (str): The key whose value will be created or incremented.
+ value (int): the amount to add to the key's value. Defaults to
+ 1.
+ """
+ if key not in self.Values:
+ self.Values[key] = 0
+ self.Values[key] += int(value)
+
+ def dump_metric(self, identifier):
+ """Writes a logger entry for each key in the map containing the
+ provided identifier, the key and the metric value.
+
+ Args:
+ identifier (str): The identifier to log.
+ """
+ for key, val in self.Values.iteritems():
+ self.dump(identifier, key, val)
+
+ def reset(self):
+ """Resets the contents of the Values dict.
+ """
+ self.Values = {}
+
+
+class Average(Metric):
+ """The Average class extends Metric to track an averaged value.
+
+ Attributes:
+ Total (int): The total incremented value of the measure.
+ Count (int): The number of times that Total has been
+ incremented.
+ """
+
+ def __init__(self, name):
+ """Constructor for the Average class.
+
+ Args:
+ name (str): The name of the metric.
+ """
+ super(Average, self).__init__(name)
+ self.reset()
+
+ def add_value(self, value):
+ """Adds to the total value and increments the counter.
+
+ Args:
+ value (int): The amount to add to the total value.
+ """
+ self.Total += value
+ self.Count += 1
+
+ def dump_metric(self, identifier):
+ """Writes a logger entry containing the provided identifier,
+ the name of the metric, the total value, and the counter.
+
+ Args:
+ identifier (str): The identifier to log.
+ """
+
+ self.dump(identifier, self.Name, self.Total, self.Count)
+
+ def reset(self):
+ """Resets the total value and the counter to zero.
+ """
+ self.Total = 0
+ self.Count = 0
+
+
+class Sample(Metric):
+ """The Sample class extends Metric to capture the output of a
+ provided closure when dump_metric() is called.
+
+ Attributes:
+ Closure (function): The function to be called when dump_metric()
+ is called.
+ """
+
+ def __init__(self, name, closure):
+ """Constructor for the Sample class.
+
+ Args:
+ name (str): The name of the metric.
+ closure (function): The function to be called when dump_metric()
+ is called.
+ """
+ super(Sample, self).__init__(name)
+ self.Closure = closure
+
+ def dump_metric(self, identifier):
+ """Writes a logger entry containing the provided identifier, the
+ name of the metric, and the return value of Closure()
+
+ Args:
+ identifier (str): The identifier to log.
+ """
+ self.dump(identifier, self.Name, self.Closure())
diff --git a/gossip/token_bucket.py b/gossip/token_bucket.py
new file mode 100644
index 0000000000..f764c79ece
--- /dev/null
+++ b/gossip/token_bucket.py
@@ -0,0 +1,90 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+"""
+This module implements the TokenBucket class for managing the average
+rate of data transmission between nodes.
+"""
+
+import logging
+
+import time
+
+logger = logging.getLogger(__name__)
+
+
+class TokenBucket(object):
+ """The TokenBucket class allows for traffic shaping via an average
+ transmission rate (the drip rate) and a limit to 'burstiness' (via
+ the bucket capacity).
+
+ Attributes:
+ DefaultDripRate (int): The default number of tokens which are
+ added to the bucket per second.
+ DefaultCapacity (int): The default maximum number of tokens
+ which can fit in the bucket.
+ DripRate (int): The configured number of tokens added to the
+ bucket per second.
+ Capacity (int): The configured maximum number of tokens which
+ can fit in the bucket.
+ LastDrip (float): The time in seconds since the epoch.
+ Tokens (int): The number of tokens in the bucket.
+
+ """
+ DefaultDripRate = 32000
+ DefaultCapacity = DefaultDripRate * 2
+
+ def __init__(self, rate=None, capacity=None):
+ """Constructor for the TokenBucket class.
+
+ Args:
+ rate (int): the drip rate for the newly created bucket in
+ tokens per second.
+ capacity (int): the maximum number of tokens the newly
+ created bucket can hold.
+
+ """
+ self.DripRate = rate or self.DefaultDripRate
+ self.Capacity = capacity or self.DefaultCapacity
+
+ self.LastDrip = time.time()
+ self.Tokens = 0
+
+ def drip(self):
+ """Adds tokens to the bucket based on the configured drip rate
+ per second, up to the capacity of the bucket.
+ """
+ now = time.time()
+ self.Tokens = min(self.Capacity,
+ self.Tokens + int(self.DripRate *
+ (now - self.LastDrip)))
+ self.LastDrip = now
+
+ def consume(self, amount):
+ """Consumes tokens from the bucket.
+
+ Args:
+ amount (int): the number of tokens to consume from the bucket.
+
+ Returns:
+ bool: If more tokens are requested than are available, returns
+ False, otherwise subtracts the tokens and returns True.
+
+ """
+ self.drip()
+
+ if amount > self.Tokens:
+ return False
+ self.Tokens -= amount
+ return True
diff --git a/gossip/topology/__init__.py b/gossip/topology/__init__.py
new file mode 100644
index 0000000000..201bce92d6
--- /dev/null
+++ b/gossip/topology/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['barabasi_albert', 'random_walk']
diff --git a/gossip/topology/barabasi_albert.py b/gossip/topology/barabasi_albert.py
new file mode 100644
index 0000000000..22ae9f297a
--- /dev/null
+++ b/gossip/topology/barabasi_albert.py
@@ -0,0 +1,106 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import random
+
+from gossip.messages import connect_message, topology_message
+
+logger = logging.getLogger(__name__)
+
+MaximumConnectivity = 15
+MinimumConnectivity = 1
+ConnectivityFudgeFactor = 1
+
+
+def start_topology_update(gossiper, oncomplete):
+ """Initiates a Barabasi-Albert topology update.
+
+ Args:
+ gossiper (Node): The local node.
+ oncomplete(function): The function to call once the topology
+ update has completed.
+ """
+ logger.info("initiate topology probe")
+ topology_message.initiate_topology_probe(
+ gossiper, lambda g, m: update_connections(g, m, oncomplete))
+
+
+def update_connections(gossiper, topology, oncomplete):
+ """Connects the node to the network by building a Barabasi-Albert graph.
+
+ Note:
+ For more information see
+ http://en.wikipedia.org/wiki/Barab%C3%A1si%E2%80%93Albert_model
+
+ Args:
+ gossiper (Node): The local node.
+ topology (dict): Map of nodes to connections.
+ oncomplete (function): The function to call once the topology
+ update has completed.
+ """
+ logger.info("update connections from topology probe")
+
+ for peer, connections in topology.iteritems():
+ logger.debug("node %s --> %s", peer.Name, len(connections))
+
+ # First pass through the topology information that was collected, compute
+ # the total number of connections per node which will give us a
+ # distribution for connections, Bara
+ total = 0
+ candidates = {}
+ for peer, connections in topology.iteritems():
+ if peer.Identifier == gossiper.LocalNode.Identifier:
+ continue
+
+ if peer.Identifier in gossiper.NodeMap:
+ continue
+
+ # this is strictly NOT part of the Barabasi graph construction because
+ # it forces a limit on connectivity, however it removes some of the
+ # worst hotspots without fundamentally changing the graph structure
+ count = len(connections)
+ if count > MaximumConnectivity:
+ continue
+
+ candidates[peer] = count
+ total += count
+
+ # Second pass selects some subset of nodes based on the number of existing
+ # connections and sends out a connection request to each
+ if total > 0:
+ for peer, count in candidates.iteritems():
+
+ # the FudgeFactor is used to increase the chance that we'll connect
+ # to a node, strictly speaking the fudge factor should be 0
+ if random.randint(0, total - 1) < count + ConnectivityFudgeFactor:
+ _sendconnectionrequest(gossiper, peer)
+
+ # call the final handler
+ oncomplete(gossiper)
+
+
+def _sendconnectionrequest(gossiper, peer):
+ logger.info("add node %s, %s, %s", peer, peer.Identifier[:8],
+ peer.NetAddress)
+
+ gossiper.add_node(peer)
+
+ request = connect_message.ConnectRequestMessage()
+ request.NetHost = gossiper.LocalNode.NetHost
+ request.NetPort = gossiper.LocalNode.NetPort
+ request.Name = gossiper.LocalNode.Name
+
+ gossiper.send_message(request, peer.Identifier)
diff --git a/gossip/topology/random_walk.py b/gossip/topology/random_walk.py
new file mode 100644
index 0000000000..0c4e499594
--- /dev/null
+++ b/gossip/topology/random_walk.py
@@ -0,0 +1,51 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from twisted.internet import reactor
+
+from gossip.messages import random_walk_message
+
+logger = logging.getLogger(__name__)
+
+TimeBetweenProbes = 1.0
+TargetConnectivity = 3
+
+
+def start_topology_update(gossiper, callback):
+ """Initiates a random walk topology update.
+
+ Args:
+ gossiper (Node): The local node.
+ callback (function): The function to call once the random walk
+ topology update has completed.
+ """
+ logger.info("initiate random walk topology update")
+
+ count = max(0, TargetConnectivity - len(gossiper.peer_list()))
+ logger.debug('adding %d connections through random walk', count)
+
+ _sendrandomwalk(gossiper, callback, count)
+
+
+def _sendrandomwalk(gossiper, callback, count):
+ if count <= 0:
+ callback()
+ return
+
+ random_walk_message.send_random_walk_message(gossiper)
+ reactor.callLater(TimeBetweenProbes, _sendrandomwalk, gossiper, callback,
+ count - 1)
diff --git a/journal/__init__.py b/journal/__init__.py
new file mode 100644
index 0000000000..5e0dfae4e2
--- /dev/null
+++ b/journal/__init__.py
@@ -0,0 +1,20 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = [
+ 'global_store_manager',
+ 'journal_core',
+ 'transaction',
+ 'transaction_block']
diff --git a/journal/consensus/__init__.py b/journal/consensus/__init__.py
new file mode 100644
index 0000000000..8dc77c4edc
--- /dev/null
+++ b/journal/consensus/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = []
diff --git a/journal/consensus/poet/__init__.py b/journal/consensus/poet/__init__.py
new file mode 100644
index 0000000000..3ad4d8d763
--- /dev/null
+++ b/journal/consensus/poet/__init__.py
@@ -0,0 +1,15 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+__all__ = ['poet_journal', 'poet_transaction_block', 'wait_certificate']
diff --git a/journal/consensus/poet/poet_enclave_simulator/__init__.py b/journal/consensus/poet/poet_enclave_simulator/__init__.py
new file mode 100644
index 0000000000..0e24362ab1
--- /dev/null
+++ b/journal/consensus/poet/poet_enclave_simulator/__init__.py
@@ -0,0 +1,14 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
diff --git a/journal/consensus/poet/poet_enclave_simulator/common.cpp b/journal/consensus/poet/poet_enclave_simulator/common.cpp
new file mode 100644
index 0000000000..27a61f2a83
--- /dev/null
+++ b/journal/consensus/poet/poet_enclave_simulator/common.cpp
@@ -0,0 +1,240 @@
+// Copyright 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ------------------------------------------------------------------------------
+
+
+/*
+*
+* Emulates secure enclave for POET implementation.
+*/
+
+#ifdef _WIN32
+ #include
+#else
+ #include
+#endif
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "poet_enclave.h"
+#include "common.h"
+
+using namespace std;
+
+
+const string PassPhrase("4 score year ago our founding fathers got really crazy and declared fridays as beer days");
+CryptoPP::ECDSA::PrivateKey GlobalPrivateKey;
+CryptoPP::ECDSA::PublicKey GlobalPublicKey;
+
+CryptoPP::ECDSA::PrivateKey WaitTimerPrivateKey;
+CryptoPP::ECDSA::PublicKey WaitTimerPublicKey;
+
+CryptoPP::AutoSeededRandomPool prng;
+
+#if KEYDEBUG
+static void PrintPrivateKey( const CryptoPP::ECDSA::PrivateKey& key )
+{
+ std::cout << std::endl;
+ std::cout << "Private Exponent:" << std::endl;
+ std::cout << " " << key.GetPrivateExponent() << std::endl;
+}
+
+static void SavePrivateKey( const string& filename, const CryptoPP::ECDSA::PrivateKey& key )
+{
+ key.DEREncodePrivateKey(CryptoPP::FileSink(filename.c_str(),true).Ref());
+}
+
+static void PrintPublicKey( const CryptoPP::ECDSA::PublicKey& key )
+{
+ std::cout << std::endl;
+ std::cout << "Public Element:" << std::endl;
+ std::cout << " X: " << key.GetPublicElement().x << std::endl;
+ std::cout << " Y: " << key.GetPublicElement().y << std::endl;
+}
+
+static void SavePublicKey( const string& filename, const CryptoPP::ECDSA::PublicKey& key )
+{
+ key.DEREncodePublicKey(CryptoPP::FileSink(filename.c_str(),true).Ref());
+}
+#endif
+
+void GenerateGlobalKey(void)
+{
+ unsigned char digest[CryptoPP::SHA256::DIGESTSIZE];
+
+ CryptoPP::SHA256().CalculateDigest(digest, (const byte *)PassPhrase.data(), PassPhrase.size());
+ CryptoPP::Integer y(digest, CryptoPP::SHA256::DIGESTSIZE);
+
+ GlobalPrivateKey.Initialize(CryptoPP::ASN1::secp256k1(), y);
+ GlobalPrivateKey.MakePublicKey(GlobalPublicKey);
+}
+
+void GenerateWaitTimerKey(void)
+{
+ WaitTimerPrivateKey.Initialize(prng, CryptoPP::ASN1::secp256k1());
+ WaitTimerPrivateKey.MakePublicKey(WaitTimerPublicKey);
+}
+
+string SignMessage(CryptoPP::ECDSA::PrivateKey privkey, string message)
+{
+ CryptoPP::ECDSA::Signer signer(privkey);
+
+ // Compute the digest of the message
+ unsigned char digest[CryptoPP::SHA256::DIGESTSIZE];
+ CryptoPP::SHA256().CalculateDigest(digest, (const byte *)message.data(), message.size());
+
+ // Sign, and trim signature to actual size
+ size_t siglen = signer.MaxSignatureLength();
+ string signature(siglen, 0x00);
+
+ siglen = signer.SignMessage(prng, (const byte *)digest, CryptoPP::SHA256::DIGESTSIZE, (byte *)signature.data());
+ signature.resize(siglen);
+
+#if ENCODESIGNATURE
+ return B32Encode(signature);
+#else
+ return signature;
+#endif
+}
+
+bool verify_signature(CryptoPP::ECDSA::PublicKey pubkey, string message, string signature)
+{
+ CryptoPP::ECDSA::Verifier verifier(pubkey);
+
+ // Compute the digest of the message
+ unsigned char digest[CryptoPP::SHA256::DIGESTSIZE];
+ CryptoPP::SHA256().CalculateDigest(digest, (const byte *)message.data(), message.size());
+
+#if ENCODESIGNATURE
+ string decoded = B32Decode(signature);
+ return verifier.VerifyMessage((const byte *)digest, CryptoPP::SHA256::DIGESTSIZE, (const byte *)decoded.data(), decoded.size());
+#else
+ // This is the unencoded version
+ return verifier.VerifyMessage((const byte *)digest, CryptoPP::SHA256::DIGESTSIZE, (const byte *)signature.data(), signature.size());
+#endif
+}
+
+double CurrentTime(void)
+{
+#if _WIN32
+ SYSTEMTIME st_epoc;
+ FILETIME ft_epoc;
+ ULARGE_INTEGER epoc;
+ SYSTEMTIME st_now;
+ FILETIME ft_now;
+ ULARGE_INTEGER now;
+ ULARGE_INTEGER now_since_epoc;
+ long now_seconds;
+
+ st_epoc.wYear = 1970;
+ st_epoc.wMonth = 1;
+ st_epoc.wDay = 1;
+ st_epoc.wDayOfWeek = 4;
+ st_epoc.wHour = 0;
+ st_epoc.wMinute = 0;
+ st_epoc.wSecond = 0;
+ st_epoc.wMilliseconds = 0;
+
+ SystemTimeToFileTime(&st_epoc, &ft_epoc);
+ epoc.LowPart = ft_epoc.dwLowDateTime;
+ epoc.HighPart = ft_epoc.dwHighDateTime;
+
+ GetSystemTime(&st_now);
+ SystemTimeToFileTime(&st_now, &ft_now);
+ now.LowPart = ft_now.dwLowDateTime;
+ now.HighPart = ft_now.dwHighDateTime;
+
+ now_since_epoc.QuadPart = now.QuadPart - epoc.QuadPart;
+
+ now_seconds = (long) (now_since_epoc.QuadPart / 10000000L);
+ return now_seconds + st_now.wMilliseconds / 1000.0;
+#else
+ struct timeval now;
+ gettimeofday(&now, NULL);
+
+ return now.tv_sec + now.tv_usec / 1000000.0;
+#endif
+}
+
+string CreateIdentifier(string signature)
+{
+ // Compute the digest of the message
+ unsigned char digest[CryptoPP::SHA256::DIGESTSIZE];
+ CryptoPP::SHA256().CalculateDigest(digest, (const byte *)signature.data(), signature.size());
+
+ CryptoPP::Base32Encoder encoder(NULL, false);
+ encoder.Put((byte *)digest, CryptoPP::SHA256::DIGESTSIZE);
+ encoder.MessageEnd();
+
+ string encoded;
+ encoded.resize(encoder.MaxRetrievable());
+ encoder.Get((byte *)encoded.data(), encoded.size());
+
+ return encoded.substr(0,16);
+}
+
+string B32Encode(string message)
+{
+ CryptoPP::Base32Encoder encoder(NULL, false);
+ encoder.Put((byte *)message.data(), message.size());
+ encoder.MessageEnd();
+
+ string encoded;
+ encoded.resize(encoder.MaxRetrievable());
+ encoder.Get((byte *)encoded.data(), encoded.size());
+
+ return encoded;
+}
+
+string B32Decode(string encoded)
+{
+ CryptoPP::Base32Decoder decoder;
+ decoder.Put((byte *)encoded.data(), encoded.size());
+ decoder.MessageEnd();
+
+ string decoded;
+ decoded.resize(decoder.MaxRetrievable());
+ decoder.Get((byte *)decoded.data(), decoded.size());
+
+ return decoded;
+}
+
+string TestSignMessage(string message)
+{
+ return SignMessage(GlobalPrivateKey, message);
+}
+
+bool TestVerifySignature(string message, string signature)
+{
+ return verify_signature(GlobalPublicKey, message, signature);
+}
+
+void InitializePoetEnclaveModule(void)
+{
+ GenerateGlobalKey();
+ GenerateWaitTimerKey();
+}
diff --git a/journal/consensus/poet/poet_enclave_simulator/common.h b/journal/consensus/poet/poet_enclave_simulator/common.h
new file mode 100644
index 0000000000..b98461d3dd
--- /dev/null
+++ b/journal/consensus/poet/poet_enclave_simulator/common.h
@@ -0,0 +1,45 @@
+// Copyright 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ------------------------------------------------------------------------------
+
+
+#include
+#include
+
+#include
+#include
+#include
+
+#define ENCODESIGNATURE 1
+
+using namespace std;
+
+extern CryptoPP::ECDSA::PrivateKey GlobalPrivateKey;
+extern CryptoPP::ECDSA::PublicKey GlobalPublicKey;
+
+extern CryptoPP::ECDSA::PrivateKey WaitTimerPrivateKey;
+extern CryptoPP::ECDSA::PublicKey WaitTimerPublicKey;
+
+void GenerateGlobalKey(void);
+void GenerateWaitTimerKey(void);
+
+string SignMessage(CryptoPP::ECDSA::PrivateKey privkey, string message);
+bool verify_signature(CryptoPP::ECDSA::PublicKey pubkey, string message, string signature);
+
+double CurrentTime(void);
+
+string CreateIdentifier(string signature);
+string B32Encode(string message);
+string B32Decode(string encoded);
+
diff --git a/journal/consensus/poet/poet_enclave_simulator/poet_enclave.h b/journal/consensus/poet/poet_enclave_simulator/poet_enclave.h
new file mode 100644
index 0000000000..75f3af952d
--- /dev/null
+++ b/journal/consensus/poet/poet_enclave_simulator/poet_enclave.h
@@ -0,0 +1,80 @@
+// Copyright 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// -----------------------------------------------------------------------------
+
+/*
+*
+* Emulates secure enclave for POET implementation.
+*/
+
+#include
+
+// This is the identifier for the genesis block
+const std::string NullIdentifier = "0000000000000000";
+
+class WaitTimer
+{
+ public:
+ WaitTimer(std::string pcertid, double localmean);
+ WaitTimer(std::string serializedtimer, std::string signature = "");
+
+ bool is_expired(void);
+
+ std::string serialize(void);
+ bool Deserialize(std::string encoded);
+
+ double MinimumWaitTime;
+ double LocalMean;
+
+ double RequestTime;
+ double Duration;
+
+ std::string PreviousCertID;
+
+ std::string Signature;
+};
+
+WaitTimer *create_wait_timer(std::string prevcertid, double localmean);
+WaitTimer *DeserializeWaitTimer(std::string serializedtimer, std::string signature = "");
+
+class WaitCertificate
+{
+ public:
+ WaitCertificate(WaitTimer *timer);
+ WaitCertificate(std::string serializedCert, std::string signature = "");
+
+ std::string Identifier(void);
+
+ std::string serialize(void);
+ bool Deserialize(std::string encoded);
+
+ double MinimumWaitTime;
+ double LocalMean;
+
+ double RequestTime;
+ double Duration;
+
+ std:: string PreviousCertID;
+
+ std::string Signature;
+};
+
+WaitCertificate *create_wait_certificate(WaitTimer *timer);
+WaitCertificate *deserialize_wait_certificate(std::string serializedcert, std::string signature = "");
+bool VerifyWaitCertificate(WaitCertificate *cert);
+
+std::string TestSignMessage(std::string message);
+bool TestVerifySignature(std::string message, std::string signature);
+
+void InitializePoetEnclaveModule(void);
diff --git a/journal/consensus/poet/poet_enclave_simulator/poet_enclave_simulator.i b/journal/consensus/poet/poet_enclave_simulator/poet_enclave_simulator.i
new file mode 100644
index 0000000000..7bfdc150a9
--- /dev/null
+++ b/journal/consensus/poet/poet_enclave_simulator/poet_enclave_simulator.i
@@ -0,0 +1,13 @@
+%module poet_enclave_simulator
+
+%include
+
+%{
+#include "poet_enclave.h"
+%}
+
+%include "poet_enclave.h"
+
+%init %{
+ InitializePoetEnclaveModule();
+%}
diff --git a/journal/consensus/poet/poet_enclave_simulator/wait_certificate.cpp b/journal/consensus/poet/poet_enclave_simulator/wait_certificate.cpp
new file mode 100644
index 0000000000..e28082b758
--- /dev/null
+++ b/journal/consensus/poet/poet_enclave_simulator/wait_certificate.cpp
@@ -0,0 +1,126 @@
+// Copyright 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ------------------------------------------------------------------------------
+
+#ifdef _WIN32
+ #include
+ #include
+#else
+ #include
+#endif
+#include
+#include
+#include
+#include
+
+#include "poet_enclave.h"
+#include "common.h"
+
+using namespace std;
+
+WaitCertificate::WaitCertificate(string encoded, string signature)
+{
+ Signature = signature;
+ MinimumWaitTime = 1.0;
+ Deserialize(encoded);
+}
+
+WaitCertificate::WaitCertificate(WaitTimer *timer)
+{
+ MinimumWaitTime = timer->MinimumWaitTime;
+ RequestTime = timer->RequestTime;
+ Duration = timer->Duration;
+ LocalMean = timer->LocalMean;
+ PreviousCertID = timer->PreviousCertID;
+ Signature = "";
+}
+
+string WaitCertificate::Identifier(void)
+{
+ if (Signature == "")
+ return NullIdentifier;
+
+ return CreateIdentifier(Signature);
+}
+
+bool WaitCertificate::Deserialize(string serialized)
+{
+ json_object *jobj = json_tokener_parse((char *)serialized.data());
+ if (jobj == NULL)
+ return false;
+
+ struct json_object *obj = NULL;
+
+ // Use alphabetical order for the keys
+ if (json_object_object_get_ex(jobj, "Duration", &obj))
+ Duration = json_object_get_double(obj);
+
+ if (json_object_object_get_ex(jobj, "LocalMean", &obj))
+ LocalMean = json_object_get_double(obj);
+
+ if (json_object_object_get_ex(jobj, "MinimumWaitTime", &obj))
+ MinimumWaitTime = json_object_get_double(obj);
+
+ if (json_object_object_get_ex(jobj, "PreviousCertID", &obj))
+ PreviousCertID = json_object_get_string(obj);
+
+ if (json_object_object_get_ex(jobj, "RequestTime", &obj))
+ RequestTime = json_object_get_double(obj);
+
+ return true;
+}
+
+string WaitCertificate::serialize()
+{
+ json_object *jobj = json_object_new_object();
+
+ // Use alphabetical order for the keys
+ json_object_object_add(jobj, "Duration", json_object_new_double(Duration));
+ json_object_object_add(jobj, "LocalMean", json_object_new_double(LocalMean));
+ json_object_object_add(jobj, "MinimumWaitTime", json_object_new_double(MinimumWaitTime));
+ json_object_object_add(jobj, "PreviousCertID", json_object_new_string((char *)PreviousCertID.data()));
+ json_object_object_add(jobj, "RequestTime", json_object_new_double(RequestTime));
+
+ string serialized = (char *)json_object_to_json_string(jobj);
+ return serialized;
+}
+
+WaitCertificate *create_wait_certificate(WaitTimer *timer)
+{
+ // the timer must have been created by the enclave
+ string serialized_timer = timer->serialize();
+ if (! verify_signature(WaitTimerPublicKey, serialized_timer, timer->Signature))
+ return NULL;
+
+ // and the timer must have expired or the previous cert must be the nullidentifier
+ if (timer->PreviousCertID != NullIdentifier && (! timer->is_expired()))
+ return NULL;
+
+ WaitCertificate *cert = new WaitCertificate(timer);
+ cert->Signature = SignMessage(GlobalPrivateKey, cert->serialize());
+
+ return cert;
+}
+
+WaitCertificate *deserialize_wait_certificate(string serializedcert, string signature)
+{
+ WaitCertificate *cert = new WaitCertificate(serializedcert, signature);
+ return cert;
+}
+
+bool VerifyWaitCertificate(WaitCertificate *cert)
+{
+ string serialized = cert->serialize();
+ return verify_signature(GlobalPublicKey, serialized, cert->Signature);
+}
diff --git a/journal/consensus/poet/poet_enclave_simulator/wait_timer.cpp b/journal/consensus/poet/poet_enclave_simulator/wait_timer.cpp
new file mode 100644
index 0000000000..68865699dc
--- /dev/null
+++ b/journal/consensus/poet/poet_enclave_simulator/wait_timer.cpp
@@ -0,0 +1,128 @@
+// Copyright 2016 Intel Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+// ------------------------------------------------------------------------------
+
+#ifdef _WIN32
+ #include
+ #include
+#else
+ #include
+#endif
+#include
+#include
+#include
+#include
+
+#include "poet_enclave.h"
+#include "common.h"
+
+using namespace std;
+
+static double ComputeDuration(double mean, double minimum)
+{
+#ifdef _WIN32
+ SYSTEMTIME seed;
+ GetLocalTime(&seed);
+ std::tr1::mt19937 generator((int) seed.wMilliseconds);
+ std::tr1::exponential_distribution distribution(1.0 / mean);
+#else
+ struct timeval seed;
+ gettimeofday(&seed, NULL);
+
+ std::default_random_engine generator(seed.tv_usec);
+ std::exponential_distribution distribution(1.0 / mean);
+#endif
+
+ return(minimum + distribution(generator));
+}
+
+WaitTimer::WaitTimer(string encoded, string signature)
+{
+ Signature = signature;
+ MinimumWaitTime = 1.0;
+ Deserialize(encoded);
+}
+
+WaitTimer::WaitTimer(string pcertid, double localmean)
+{
+ MinimumWaitTime = 1.0;
+ LocalMean = localmean;
+
+ RequestTime = CurrentTime();
+ Duration = ComputeDuration(LocalMean, MinimumWaitTime);
+
+ PreviousCertID = pcertid;
+}
+
+bool WaitTimer::is_expired(void)
+{
+ return (RequestTime + Duration) < CurrentTime() ? true : false;
+}
+
+bool WaitTimer::Deserialize(string serialized)
+{
+ json_object *jobj = json_tokener_parse((char *)serialized.data());
+ if (jobj == NULL)
+ return false;
+
+ struct json_object *obj = NULL;
+
+ // Use alphabetical order for the keys
+ if (json_object_object_get_ex(jobj, "Duration", &obj))
+ Duration = json_object_get_double(obj);
+
+ if (json_object_object_get_ex(jobj, "LocalMean", &obj))
+ LocalMean = json_object_get_double(obj);
+
+ if (json_object_object_get_ex(jobj, "MinimumWaitTime", &obj))
+ MinimumWaitTime = json_object_get_double(obj);
+
+ if (json_object_object_get_ex(jobj, "PreviousCertID", &obj))
+ PreviousCertID = json_object_get_string(obj);
+
+ if (json_object_object_get_ex(jobj, "RequestTime", &obj))
+ RequestTime = json_object_get_double(obj);
+
+ return true;
+}
+
+string WaitTimer::serialize()
+{
+ json_object *jobj = json_object_new_object();
+
+ // Use alphabetical order for the keys
+ json_object_object_add(jobj, "Duration", json_object_new_double(Duration));
+ json_object_object_add(jobj, "LocalMean", json_object_new_double(LocalMean));
+ json_object_object_add(jobj, "MinimumWaitTime", json_object_new_double(MinimumWaitTime));
+ json_object_object_add(jobj, "PreviousCertID", json_object_new_string((char *)PreviousCertID.data()));
+ json_object_object_add(jobj, "RequestTime", json_object_new_double(RequestTime));
+
+ string serialized = (char *)json_object_to_json_string(jobj);
+
+ return serialized;
+}
+
+WaitTimer *create_wait_timer(string prevcertid, double localmean)
+{
+ WaitTimer *timer = new WaitTimer(prevcertid, localmean);
+ timer->Signature = SignMessage(WaitTimerPrivateKey, timer->serialize());
+
+ return(timer);
+}
+
+WaitTimer *DeserializeWaitTimer(string serializedtimer, string signature)
+{
+ WaitTimer *timer = new WaitTimer(serializedtimer, signature);
+ return timer;
+}
diff --git a/journal/consensus/poet/poet_journal.py b/journal/consensus/poet/poet_journal.py
new file mode 100644
index 0000000000..be3ca56966
--- /dev/null
+++ b/journal/consensus/poet/poet_journal.py
@@ -0,0 +1,150 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import collections
+import logging
+
+from journal.consensus.poet import poet_transaction_block
+from gossip import common, stats
+from journal import journal_core
+from journal.consensus.poet.wait_certificate import WaitTimer
+
+logger = logging.getLogger(__name__)
+
+
+class PoetJournal(journal_core.Journal):
+ """Implements a journal based on the proof of elapsed time
+ consensus mechanism.
+
+ Attributes:
+ onHeartBeatTimer (EventHandler): The EventHandler tracking
+ calls to make when the heartbeat timer fires.
+ MaximumBlocksToKeep (int): The maximum number of blocks to
+ keep.
+ """
+ def __init__(self, node, **kwargs):
+ """Constructor for the PoetJournal class.
+
+ Args:
+ node (Node): The local node.
+ """
+ super(PoetJournal, self).__init__(node, **kwargs)
+
+ self.onHeartbeatTimer += self._check_certificate
+
+ # initialize the poet handlers
+ poet_transaction_block.register_message_handlers(self)
+
+ # initialize stats specifically for the block chain journal
+ self.JournalStats.add_metric(stats.Counter('BlocksClaimed'))
+
+ # propagate the maximum blocks to keep
+ self.MaximumBlocksToKeep = WaitTimer.CertificateSampleLength
+
+ def build_transaction_block(self, force=False):
+ """Builds a transaction block that is specific to this particular
+ consensus mechanism, in this case we build a block that contains a
+ wait certificate.
+
+ Args:
+ force (boolean): Whether to force creation of the initial
+ block.
+
+ Returns:
+ PoetTransactionBlock: The constructed block with the wait
+ certificate.
+ """
+ logger.debug('attempt to build transaction block extending %s',
+ self.MostRecentCommitedBlockID[:8])
+
+ # Get the list of prepared transactions, if there aren't enough
+ # then just return
+ txnlist = self._preparetransactionlist(
+ self.MaximumTransactionsPerBlock)
+ if len(txnlist) < self.MinimumTransactionsPerBlock and not force:
+ logger.debug('no transactions found, no block constructed')
+ return None
+
+ logger.info('build transaction block to extend %s with %s '
+ 'transactions',
+ self.MostRecentCommitedBlockID[:8], len(txnlist))
+
+ # Create a new block from all of our pending transactions
+ nblock = poet_transaction_block.PoetTransactionBlock()
+ nblock.BlockNum = self.MostRecentCommitedBlock.BlockNum \
+ + 1 if self.MostRecentCommitedBlock else 0
+ nblock.PreviousBlockID = self.MostRecentCommitedBlockID
+ nblock.TransactionIDs = txnlist
+ nblock.create_wait_timer(self._build_certificate_list(nblock))
+
+ # must put a cap on the transactions in the block
+ if len(nblock.TransactionIDs) >= self.MaximumTransactionsPerBlock:
+ nblock.TransactionIDs = \
+ nblock.TransactionIDs[:self.MaximumTransactionsPerBlock]
+
+ logger.debug('created new pending block with timer <%s> and '
+ '%d transactions', nblock.WaitTimer,
+ len(nblock.TransactionIDs))
+
+ # fire the build block event handlers
+ self.onBuildBlock.fire(self, nblock)
+
+ return nblock
+
+ def claim_transaction_block(self, nblock):
+ """Claims the block and transmits a message to the network
+ that the local node won.
+
+ Args:
+ nblock (PoetTransactionBlock): The block to claim.
+ """
+ logger.info('node %s validates block with %d transactions',
+ self.LocalNode.Name, len(nblock.TransactionIDs))
+
+ # Claim the block
+ nblock.create_wait_certificate()
+ nblock.sign_from_node(self.LocalNode)
+ self.JournalStats.BlocksClaimed.increment()
+
+ # Fire the event handler for block claim
+ self.onClaimBlock.fire(self, nblock)
+
+ # And send out the message that we won
+ msg = poet_transaction_block.PoetTransactionBlockMessage()
+ msg.TransactionBlock = nblock
+ msg.SenderID = self.LocalNode.Identifier
+ msg.sign_from_node(self.LocalNode)
+
+ self.PendingTransactionBlock = None
+ self.handle_message(msg)
+
+ def _build_certificate_list(self, block):
+ # for the moment we just dump all of these into one list,
+ # not very efficient but it makes things a lot easier to maintain
+ certs = collections.deque()
+ count = WaitTimer.CertificateSampleLength
+
+ while block.PreviousBlockID != common.NullIdentifier \
+ and len(certs) < count:
+ block = self.BlockStore[block.PreviousBlockID]
+ certs.appendleft(block.WaitCertificate)
+
+ # drop the root block off the computation
+ return list(certs)
+
+ def _check_certificate(self, now):
+ if self.PendingTransactionBlock \
+ and self.PendingTransactionBlock.wait_timer_is_expired(now):
+ self.claim_transaction_block(self.PendingTransactionBlock)
diff --git a/journal/consensus/poet/poet_transaction_block.py b/journal/consensus/poet/poet_transaction_block.py
new file mode 100644
index 0000000000..a0ced0d7d4
--- /dev/null
+++ b/journal/consensus/poet/poet_transaction_block.py
@@ -0,0 +1,170 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from journal import transaction_block
+from journal.messages import transaction_block_message
+from journal.consensus.poet.wait_certificate import WaitCertificate, WaitTimer
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(journal):
+ """Registers poet transaction block message handlers with
+ the journal.
+
+ Args:
+ journal (PoetJournal): The journal on which to register the
+ message handlers.
+ """
+ journal.register_message_handler(
+ PoetTransactionBlockMessage,
+ transaction_block_message.transaction_block_message_handler)
+
+
+class PoetTransactionBlockMessage(
+ transaction_block_message.TransactionBlockMessage):
+ """Poet transaction block messages represent the message format
+ for exchanging information about poet transaction blocks.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ """
+ MessageType = "/" + __name__ + "/TransactionBlock"
+
+ def __init__(self, minfo={}):
+ super(PoetTransactionBlockMessage, self).__init__(minfo)
+
+ tinfo = minfo.get('TransactionBlock', {})
+ self.TransactionBlock = PoetTransactionBlock(tinfo)
+
+
+class PoetTransactionBlock(transaction_block.TransactionBlock):
+ """A poet transaction block is a set of poet transactions to
+ be applied to a ledger.
+
+ Attributes:
+ TransactionBlockTypeName (str): The name of the
+ transaction block type.
+ MessageType (type): The poet transaction block message
+ class.
+ WaitTimer (WaitTimer): The wait timer for the block.
+ WaitCertificate (WaitCertificate): The wait certificate
+ for the block.
+ """
+ TransactionBlockTypeName = '/Lottery/PoetTransactionBlock'
+ MessageType = PoetTransactionBlockMessage
+
+ def __init__(self, minfo={}):
+ """Constructor for the PoetTransactionBlock class.
+
+ Args:
+ minfo (dict): A dict of values for initializing
+ PoetTransactionBlocks.
+ """
+ super(PoetTransactionBlock, self).__init__(minfo)
+
+ self.WaitTimer = None
+ self.WaitCertificate = None
+
+ if 'WaitCertificate' in minfo:
+ wc = minfo.get('WaitCertificate')
+ serialized = wc.get('SerializedCert')
+ signature = wc.get('Signature')
+ self.WaitCertificate = \
+ WaitCertificate.deserialize_wait_certificate(
+ serialized, signature)
+
+ def __str__(self):
+ return "{0}, {1}, {2}, {3:0.2f}, {4}".format(
+ self.BlockNum, self.Identifier[:8], len(self.TransactionIDs),
+ self.CommitTime, self.WaitCertificate)
+
+ def __cmp__(self, other):
+ """
+ Compare two blocks, this will throw an error unless
+ both blocks are valid.
+ """
+ if self.Status != transaction_block.Status.valid:
+ raise ValueError('block {0} must be valid for comparison'.format(
+ self.Identifier))
+
+ if other.Status != transaction_block.Status.valid:
+ raise ValueError('block {0} must be valid for comparison'.format(
+ other.Identifier))
+
+ if self.TransactionDepth < other.TransactionDepth:
+ return -1
+ elif self.TransactionDepth > other.TransactionDepth:
+ return 1
+ else:
+ return cmp(self.Identifier, other.Identifier)
+
+ def is_valid(self, journal):
+ """Verifies that the block received is valid.
+
+ This includes checks for valid signature and a valid
+ waitcertificate.
+
+ Args:
+ journal (PoetJorunal): Journal for pulling context.
+ """
+ if not super(PoetTransactionBlock, self).is_valid(journal):
+ return False
+
+ if not self.WaitCertificate:
+ logger.info('not a valid block, no wait certificate')
+ return False
+
+ return self.WaitCertificate.is_valid_wait_certificate(
+ journal._build_certificate_list(self))
+
+ def create_wait_timer(self, certlist):
+ """Creates a wait timer for the journal based on a list
+ of wait certificates.
+
+ Args:
+ certlist (list): A list of wait certificates.
+ """
+ self.WaitTimer = WaitTimer.create_wait_timer(certlist)
+
+ def create_wait_certificate(self):
+ """Create a wait certificate for the journal based on the
+ wait timer.
+ """
+ self.WaitCertificate = WaitCertificate.create_wait_certificate(
+ self.WaitTimer)
+
+ def wait_timer_is_expired(self, now):
+ """Determines if the wait timer is expired.
+
+ Returns:
+ bool: Whether or not the wait timer is expired.
+ """
+ return self.WaitTimer.is_expired(now)
+
+ def dump(self):
+ """Returns a dict with information about the poet transaction
+ block.
+
+ Returns:
+ dict: A dict containing information about the poet
+ transaction block.
+ """
+ result = super(PoetTransactionBlock, self).dump()
+ result['WaitCertificate'] = self.WaitCertificate.dump()
+
+ return result
diff --git a/journal/consensus/poet/wait_certificate.py b/journal/consensus/poet/wait_certificate.py
new file mode 100644
index 0000000000..4838d38a9d
--- /dev/null
+++ b/journal/consensus/poet/wait_certificate.py
@@ -0,0 +1,175 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+import logging
+import importlib
+from journal.consensus.poet.wait_timer import WaitTimer
+
+import journal.consensus.poet.poet_enclave_simulator.poet_enclave_simulator \
+ as PoetEnclaveSimulator
+
+logger = logging.getLogger(__name__)
+
+
+# This is necessary for float comparisons
+def is_close(a, b, rel_tol=1e-09, abs_tol=0.0):
+ """Determines whether two floats are within a tolerance.
+
+ Returns:
+ bool: Returns True if the two floats are within a tolerance,
+ False otherwise.
+ """
+ return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
+
+
+class WaitCertificate(object):
+ """Represents wait certificates, which include a random wait timer.
+
+ Attributes:
+ PoetEnclave (module): The PoetEnclave module to use for executing
+ enclave functions.
+ PreviousCertID (str): The id of the previous certificate.
+ LocalMean (float): The local mean wait time based on the history
+ of certs.
+ RequestTime (float): The request time of the certificate.
+ Duration (float): The duration of the wait timer.
+ Signature (str): The signature of the certificate.
+ Identifier (str): The identifier of this certificate.
+ SerializedCert (str): A serialized version of the certificate.
+ """
+ PoetEnclave = PoetEnclaveSimulator
+ try:
+ PoetEnclave = importlib.import_module("poet_enclave.poet_enclave")
+ except ImportError, e:
+ pass
+
+ @classmethod
+ def create_wait_certificate(cls, timer):
+ """Creates a wait certificate in the enclave and then constructs
+ a WaitCertificate object.
+
+ Args:
+ timer (WaitTimer): The wait timer to use in creating the
+ certificate.
+
+ Returns:
+ WaitCertificate: A new wait certificate.
+ """
+ cert = cls.PoetEnclave.create_wait_certificate(timer.EnclaveWaitTimer)
+ if not cert:
+ logger.warn('invalid timer: %s', timer)
+ raise Exception(
+ 'create_wait_certificate',
+ 'Attempt to create wait certificate from invalid wait timer')
+
+ wc = cls(cert)
+ logger.info('wait certificate created; %s', wc)
+
+ return wc
+
+ @classmethod
+ def deserialize_wait_certificate(cls, serialized, signature):
+ """Converts a serialized wait certificate into an object.
+
+ Args:
+ serialized (str): The serialized wait certificate.
+ signature (str): The signature.
+
+ Returns:
+ WaitCertificate: A wait certificate representing the
+ contents of the serialized wait certificate.
+ """
+ cert = cls.PoetEnclave.deserialize_wait_certificate(
+ serialized, signature)
+ if cert.PreviousCertID != cls.PoetEnclave.NullIdentifier:
+ if not cls.PoetEnclave.VerifyWaitCertificate(cert):
+ raise Exception(
+ 'WaitCertificateVerify',
+ 'Attempt to deserialize an invalid wait certificate')
+
+ return cls(cert)
+
+ def __init__(self, cert):
+ """Initialize the wait certificate.
+
+ Args:
+ cert (poet_enclave.WaitCertificate): The poet enclave
+ generated wait certificate.
+ """
+ self.PreviousCertID = cert.PreviousCertID
+ self.LocalMean = cert.LocalMean
+ self.RequestTime = cert.RequestTime
+ self.Duration = cert.Duration
+ self.Signature = cert.Signature
+ self.Identifier = cert.Identifier()
+
+ # we cannot hold the certificate because it cannot be pickled for
+ # storage in the transaction block array
+ self.SerializedCert = cert.serialize()
+
+ @property
+ def EnclaveWaitCertificate(self):
+ """Returns the enclave version of the wait certificate.
+
+ Returns:
+ poet_enclave.WaitCertificate: Enclave deserialized version
+ of the certificate.
+ """
+ return self.PoetEnclave.deserialize_wait_certificate(
+ self.SerializedCert,
+ self.Signature)
+
+ def is_valid_wait_certificate(self, certs):
+ """Determines whether the wait certificate is valid.
+
+ Args:
+ certs (list): A list of historical certs.
+
+ Returns:
+ bool: Whether or not the wait certificate is valid.
+ """
+ cert = self.EnclaveWaitCertificate
+ expectedmean = WaitTimer.compute_local_mean(certs)
+ if not is_close(cert.LocalMean, expectedmean, abs_tol=0.001):
+ logger.warn('mismatch local mean: %s != %s', cert.LocalMean,
+ expectedmean)
+ # return False
+
+ if cert.PreviousCertID == self.PoetEnclave.NullIdentifier:
+ return True
+
+ if cert.PreviousCertID != certs[-1].Identifier:
+ logger.warn('mismatch previous identifier: %s != %s',
+ cert.PreviousCertID, certs[-1].Identifier)
+ # return False
+
+ return self.PoetEnclave.VerifyWaitCertificate(cert)
+
+ def __str__(self):
+ return "CERT, {0:0.2f}, {1:0.2f}, {2}, {3}".format(
+ self.LocalMean, self.Duration, self.Identifier,
+ self.PreviousCertID)
+
+ def dump(self):
+ """Returns a dict containing information about the wait
+ certificate.
+
+ Returns:
+ dict: A dict containing info about the wait certificate.
+ """
+ result = {
+ 'SerializedCert': self.SerializedCert,
+ 'Signature': self.Signature
+ }
+ return result
diff --git a/journal/consensus/poet/wait_timer.py b/journal/consensus/poet/wait_timer.py
new file mode 100644
index 0000000000..6b7c3a533d
--- /dev/null
+++ b/journal/consensus/poet/wait_timer.py
@@ -0,0 +1,182 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import importlib
+import journal.consensus.poet.poet_enclave_simulator.poet_enclave_simulator \
+ as PoetEnclaveSimulator
+
+logger = logging.getLogger(__name__)
+
+
+class WaitTimer(object):
+ """Wait timers represent a random duration incorporated into a wait
+ certificate.
+
+ Attributes:
+ MinimumWaitTime (float): The minimum wait time in seconds.
+ TargetWaitTime (float): The target wait time in seconds.
+ InitialWaitTime (float): The initial wait time in seconds.
+ CertificateSampleLength (int): The number of certificates to
+ sample for the population estimate.
+ FixedDurationBlocks (int): If fewer than FixedDurationBlocks
+ exist, then base the local mean on a ratio based on
+ InitialWaitTime, rather than the history.
+ PoetEnclave (module): The PoetEnclave module to use for
+ executing enclave functions.
+ PreviousCertID (str): The id of the previous certificate.
+ LocalMean (float): The local mean wait time based on the history
+ of certs.
+ RequestTime (float): The request time.
+ Duration (float): The duration of the wait timer.
+ Signature (str): The signature of the timer.
+ SerializedTimer (str): A serialized version of the timer.
+
+ """
+ MinimumWaitTime = 1.0
+ TargetWaitTime = 30.0
+ InitialWaitTime = 3000.0
+ CertificateSampleLength = 50
+ FixedDurationBlocks = CertificateSampleLength
+
+ PoetEnclave = PoetEnclaveSimulator
+ try:
+ PoetEnclave = importlib.import_module("poet_enclave.poet_enclave")
+ except ImportError, e:
+ pass
+
+ @classmethod
+ def create_wait_timer(cls, certs):
+ """Creates a wait timer in the enclave and then constructs
+ a WaitTimer object.
+
+ Args:
+ certs (list): A historical list of certificates.
+
+ Returns:
+ WaitTimer: A new wait timer.
+ """
+ previd = certs[-1].Identifier if certs else \
+ cls.PoetEnclave.NullIdentifier
+ mean = cls.compute_local_mean(certs)
+ timer = cls.PoetEnclave.create_wait_timer(previd, mean)
+
+ wt = cls(timer)
+ logger.info('wait timer created; %s', wt)
+
+ return wt
+
+ @classmethod
+ def compute_local_mean(cls, certs):
+ """Computes the local mean wait time based on the certificate
+ history.
+
+ Args:
+ certs (list): A historical list of certificates.
+
+ Returns:
+ float: The local mean wait time.
+ """
+ count = len(certs)
+ if count < cls.FixedDurationBlocks:
+ ratio = 1.0 * count / cls.FixedDurationBlocks
+ return cls.TargetWaitTime * (
+ 1 - ratio * ratio) + cls.InitialWaitTime * ratio * ratio
+
+ return cls.TargetWaitTime * cls.population_estimate(certs)
+
+ @classmethod
+ def population_estimate(cls, certificates):
+ """Estimates the size of the validator population by computing
+ the average wait time and the average local mean used by
+ the winning validator.
+
+ Since the entire population should be computing from the same
+ local mean based on history of certificates and we know that
+ the minimum value drawn from a population of size N of
+ exponentially distributed variables will be exponential with
+ mean being 1 / N, we can estimate the population size from the
+ ratio of local mean to global mean. a longer list of
+ certificates will provide a better estimator only if the
+ population of validators is relatively stable.
+
+ Note:
+
+ See the section entitled "Distribution of the minimum of
+ exponential random variables" in the page
+ http://en.wikipedia.org/wiki/Exponential_distribution
+
+ Args:
+ certificates (list): Previously committed certificates,
+ ordered newest to oldest
+ """
+ summeans = 0
+ sumwaits = 0
+ for cert in certificates[:cls.CertificateSampleLength]:
+ sumwaits += cert.Duration - cls.MinimumWaitTime
+ summeans += cert.LocalMean
+
+ avgwait = sumwaits / len(certificates)
+ avgmean = summeans / len(certificates)
+
+ return avgmean / avgwait
+
+ def __init__(self, timer):
+ """Constructor for the WaitTimer class.
+
+ Args:
+ PreviousCertID (str): The id of the previous certificate.
+ LocalMean (float): The local mean wait time based on the
+ history of certs.
+ RequestTime (float): The request time.
+ Duration (float): The duration of the wait timer.
+ Signature (str): The signature of the timer.
+ SerializedTimer (str): A serialized version of the timer.
+ """
+ self.PreviousCertID = timer.PreviousCertID
+ self.LocalMean = timer.LocalMean
+ self.RequestTime = timer.RequestTime
+ self.Duration = timer.Duration
+ self.Signature = timer.Signature
+ self.SerializedTimer = timer.serialize()
+
+ @property
+ def EnclaveWaitTimer(self):
+ """Converts the serialized timer into an object.
+
+ Returns:
+ poet_enclave.WaitTimer: The deserialized enclave timer
+ object.
+ """
+ return self.PoetEnclave.DeserializeWaitTimer(self.SerializedTimer,
+ self.Signature)
+
+ def __str__(self):
+ return "TIMER, {0:0.2f}, {1:0.2f}, {2}".format(
+ self.LocalMean, self.Duration, self.PreviousCertID)
+
+ def is_expired(self, now):
+ """Determines whether the timer has expired.
+
+ Args:
+ now (float): The current time.
+
+ Returns:
+ bool: True if the timer has expired, false otherwise.
+ """
+ if now < (self.RequestTime + self.Duration):
+ return False
+
+ return self.EnclaveWaitTimer.is_expired()
diff --git a/journal/consensus/quorum/__init__.py b/journal/consensus/quorum/__init__.py
new file mode 100644
index 0000000000..01212b9ee3
--- /dev/null
+++ b/journal/consensus/quorum/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['quorum_journal', 'quorum_transaction_block']
diff --git a/journal/consensus/quorum/messages/__init__.py b/journal/consensus/quorum/messages/__init__.py
new file mode 100644
index 0000000000..6b3daa689b
--- /dev/null
+++ b/journal/consensus/quorum/messages/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['quorum_advertisement', 'quorum_debug', 'quorum_ballot']
diff --git a/journal/consensus/quorum/messages/quorum_advertisement.py b/journal/consensus/quorum/messages/quorum_advertisement.py
new file mode 100644
index 0000000000..6b285d0059
--- /dev/null
+++ b/journal/consensus/quorum/messages/quorum_advertisement.py
@@ -0,0 +1,175 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import random
+
+from gossip import message, node
+
+logger = logging.getLogger(__name__)
+
+
+def send_quorum_advertisement_message(journal):
+ """Sends a quorum advertisement message to peers.
+
+ Args:
+ journal (QuorumJournal): The journal on which the quorum will
+ take place.
+ """
+ logger.info('sending quorum advertisement')
+
+ msg = QuorumAdvertisementMessage.create_from_node(journal.LocalNode)
+
+ peers = journal.peer_id_list()
+ if len(peers) > 0:
+ peerid = random.choice(peers)
+ journal.send_message(msg, peerid)
+
+
+def register_message_handlers(journal):
+ """Registers the message handlers which are triggered when
+ quorum advertisement messages arrive.
+
+ Args:
+ journal (QuorumJournal): The journal to register the message
+ handlers against.
+ """
+ journal.register_message_handler(QuorumAdvertisementMessage,
+ quorum_advertisement_handler)
+
+
+class QuorumAdvertisementMessage(message.Message):
+ """Quorum advertisement messages represent the message format
+ for exchanging quorum advertisements.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is
+ a system message.
+ IsForward (bool): Whether or not this message is forwarded.
+ IsReliable (bool): Whether or not this message should
+ use reliable delivery.
+ Identifier (str): The identifier of the node.
+ NetHost (str): Hostname or IP address identifying the node.
+ NetPort (int): The remote port number to connect to.
+ Name (str): The name of the originator.
+ TimeToLive (int): The number of hops for the message to
+ live.
+ """
+ MessageType = "/" + __name__ + "/Quorum/Advertisement"
+
+ @staticmethod
+ def create_from_node(node):
+ """Creates a QuorumAdvertisementMessage from a node.
+
+ Args:
+ node (Node): The node to create the message from.
+
+ Returns:
+ QuorumAdvertisementMessage: The new message.
+ """
+ msg = QuorumAdvertisementMessage()
+ msg.Identifier = node.Identifier
+ msg.NetHost = node.NetHost
+ msg.NetPort = node.NetPort
+ msg.Name = node.Name
+ return msg
+
+ def __init__(self, minfo={}):
+ """Constructor for the QuorumAdvertisementMessage class.
+
+ Args:
+ minfo (dict): A dict containing initialization values
+ for the new QuorumAdvertisementMessage.
+ """
+ super(QuorumAdvertisementMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = False
+ self.IsReliable = True
+
+ self.Identifier = minfo.get('Identifier', '')
+ self.NetHost = minfo.get('Host', "127.0.0.1")
+ self.NetPort = minfo.get('Port', 0)
+ self.Name = minfo.get('Name', self.OriginatorID[:8])
+
+ self.TimeToLive = 8
+
+ @property
+ def NetAddress(self):
+ """Returns the host and port of the quorum advertisement
+ message.
+
+ Returns:
+ ordered pair: (host, port).
+ """
+ return (self.NetHost, self.NetPort)
+
+ def dump(self):
+ """Dumps a dict containing object attributes.
+
+ Returns:
+ dict: A mapping of object attribute names to values.
+ """
+ result = super(QuorumAdvertisementMessage, self).dump()
+
+ result['Host'] = self.NetHost
+ result['Port'] = self.NetPort
+ result['Name'] = self.Name
+ result['Identifier'] = self.Identifier
+
+ return result
+
+
+def quorum_advertisement_handler(msg, journal):
+ """Function called when the journal receives a
+ QuorumAdvertisementMessage from one of its peers.
+
+ Args:
+ msg (QuorumAdvertisementMessage): The received quorum
+ advertisement message.
+ journal (QuorumJournal): The journal which received the
+ message.
+ """
+ logger.info("quorum advertisement received from %s", msg.OriginatorID[:8])
+
+ if msg.OriginatorID == journal.LocalNode.Identifier:
+ logger.debug(
+ 'node %s received its own quorum advertisement request, ignore',
+ journal.LocalNode)
+ return
+
+ onode = node.Node(address=msg.NetAddress,
+ identifier=msg.Identifier,
+ name=msg.Name)
+ journal.add_quorum_node(onode)
+
+ # if there is still life in the message, then see if we should forward it
+ # to another node
+
+ if msg.TimeToLive > 0:
+ # see if we can find a peer other than the peer who forwarded the
+ # message to us, if not then we'll just drop the request
+
+ try:
+ peers = journal.peer_id_list()
+ peers.remove(msg.SenderID)
+ peers.remove(msg.OriginatorID)
+ except:
+ pass
+
+ if len(peers) > 0:
+ peerid = random.choice(peers)
+ journal.send_message(msg, peerid, initialize=False)
diff --git a/journal/consensus/quorum/messages/quorum_ballot.py b/journal/consensus/quorum/messages/quorum_ballot.py
new file mode 100644
index 0000000000..34da9e8266
--- /dev/null
+++ b/journal/consensus/quorum/messages/quorum_ballot.py
@@ -0,0 +1,231 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from gossip import message
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(journal):
+ """Registers the message handlers which are triggered when
+ quorum ballot messages arrive.
+
+ Args:
+ journal (QuorumJournal): The journal to register the message
+ handlers against.
+ """
+ journal.register_message_handler(QuorumBallotMessage,
+ quorum_ballot_handler)
+ journal.register_message_handler(QuorumInitiateVoteMessage,
+ quorum_initiate_vote_handler)
+ journal.register_message_handler(QuorumCompleteVoteMessage,
+ quorum_complete_vote_handler)
+
+
+class QuorumBallotMessage(message.Message):
+ """Quorum ballot message represent the message format for
+ exchanging quorum ballots.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is a
+ system message.
+ IsForward (bool): Whether or not this message is forwarded.
+ IsReliable (bool): Whether or not this message should use
+ reliable delivery.
+ Ballot (int): The ballot number.
+ BlockNumber (int): The block number.
+ TransactionIDs (list): The list of transactions to appear on
+ the ballot.
+ """
+ MessageType = "/" + __name__ + "/Quorum/Ballot"
+
+ def __init__(self, minfo={}):
+ """Constructor for QuorumBallotMessage.
+
+ Args:
+ minfo (dict): A dict containing initial values for the
+ new QuorumBallotMessages.
+ """
+ super(QuorumBallotMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.Ballot = minfo.get('Ballot', 0)
+ self.BlockNumber = minfo.get('BlockNumber', 0)
+
+ self.TransactionIDs = []
+ if 'TransactionIDs' in minfo:
+ for txnid in minfo['TransactionIDs']:
+ self.TransactionIDs.append(str(txnid))
+
+ def dump(self):
+ """Returns a dict containing information about the quorum
+ ballot message.
+
+ Returns:
+ dict: A dict containing information about the quorum
+ ballot message.
+ """
+ result = super(QuorumBallotMessage, self).dump()
+
+ result['Ballot'] = self.Ballot
+ result['BlockNumber'] = self.BlockNumber
+
+ result['TransactionIDs'] = []
+ for txnid in self.TransactionIDs:
+ result['TransactionIDs'].append(str(txnid))
+
+ return result
+
+
+def quorum_ballot_handler(msg, journal):
+ """Function called when the journal receives a
+ QuorumBallotMessage from one of its peers.
+
+ Args:
+ msg (QuorumBallotMessage): The received quorum ballot message.
+ journal (QuorumJournal): The journal which received the message.
+ """
+ logger.info("unhandled quorum ballot message received from %s",
+ journal._id2name(msg.OriginatorID))
+
+
+class QuorumInitiateVoteMessage(message.Message):
+ """Quorum initiate vote messages represent the message format for
+ exchanging quorum advertisements.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is a system
+ message.
+ IsForward (bool): Whether this message is forwarded.
+ IsReliable (bool): Whether or not this message should use
+ reliable delivery.
+ BlockNumber (int): The number of the block.
+ """
+ MessageType = "/" + __name__ + "/Quorum/InitiateVote"
+
+ def __init__(self, minfo={}):
+ """Constructor for QuorumInitiateVoteMessage.
+
+ Args:
+ minfo (dict): A dict containing initial values for
+ the new QuorumInitiateVoteMessage.
+ """
+ super(QuorumInitiateVoteMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = False
+ self.IsReliable = True
+
+ self.BlockNumber = minfo.get('BlockNumber', 0)
+
+ def dump(self):
+ result = super(QuorumInitiateVoteMessage, self).dump()
+ result['BlockNumber'] = self.BlockNumber
+
+ return result
+
+
+def quorum_initiate_vote_handler(msg, journal):
+ """Function called when the journal receives a
+ QuorumInitiateVoteMessage from one of its peers.
+
+ Args:
+ msg (QuorumInitiateVoteMessage): The received quorum initiate
+ vote message.
+ journal (QuorumJournal): The journal which received the
+ message.
+ """
+ logger.debug("quorum initiation request received from %s",
+ journal._id2name(msg.OriginatorID))
+
+ if journal.handle_vote_initiation(msg.BlockNumber):
+ journal.forward_message(msg,
+ exceptions=[msg.SenderID],
+ initialize=False)
+
+
+class QuorumCompleteVoteMessage(message.Message):
+ """Quorum complete vote messages represent the message format
+ for exchanging information between peers when voting has completed.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is
+ a system message.
+ IsForward (bool): Whether or not this message is forwarded.
+ IsReliable (bool): Whether or not this message should
+ use reliable delivery.
+ BlockNumber (int): The block number.
+ TransactionIDs (list): The list of transactions which are
+ a part of the vote.
+ """
+ MessageType = "/" + __name__ + "/Quorum/CompleteVote"
+
+ def __init__(self, minfo={}):
+ """Constructor for QuorumCompleteVoteMessage.
+
+ Args:
+ minfo (dict): A dict containing initial values for the
+ new QuorumCompleteVoteMessage.
+ """
+ super(QuorumCompleteVoteMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = False
+ self.IsReliable = True
+
+ self.BlockNumber = minfo.get('BlockNumber', 0)
+
+ self.TransactionIDs = []
+ if 'TransactionIDs' in minfo:
+ for txnid in minfo['TransactionIDs']:
+ self.TransactionIDs.append(str(txnid))
+
+ def dump(self):
+ """Returns a dict containing information about the quorum
+ complete vote message.
+
+ Returns:
+ dict: A dict containing information about the quorum
+ complete vote message.
+ """
+ result = super(QuorumCompleteVoteMessage, self).dump()
+ result['BlockNumber'] = self.BlockNumber
+
+ result['TransactionIDs'] = []
+ for txnid in self.TransactionIDs:
+ result['TransactionIDs'].append(str(txnid))
+
+ return result
+
+
+def quorum_complete_vote_handler(msg, journal):
+ """Function called when the journal receives a
+ QuorumCompleteVoteMessage from one of its peers.
+ """
+ logger.debug("quorum initiation request received from %s",
+ journal._id2name(msg.OriginatorID))
+
+ if journal.complete_vote(msg.BlockNumber, msg.TransactionIDs):
+ journal.forward_message(msg,
+ exceptions=[msg.SenderID],
+ initialize=False)
diff --git a/journal/consensus/quorum/messages/quorum_debug.py b/journal/consensus/quorum/messages/quorum_debug.py
new file mode 100644
index 0000000000..0e03c239aa
--- /dev/null
+++ b/journal/consensus/quorum/messages/quorum_debug.py
@@ -0,0 +1,78 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import time
+
+from gossip import message
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(journal):
+ """Register the message handlers that every journal should support.
+
+ Args:
+ journal (QuorumJournal): The journal to register the handlers
+ against.
+ """
+ journal.register_message_handler(DumpQuorumMessage, _dumpquorumhandler)
+
+
+def _dumpquorumhandler(msg, journal):
+ logger.info('dumping quorum for %s', journal.LocalNode)
+
+ identifier = "{0}, {1:0.2f}, {2}".format(journal.LocalNode, time.time(),
+ msg.Identifier[:8])
+
+ for node in journal.VotingQuorum.itervalues():
+ logger.info('quorum, %s, %s', identifier, node)
+
+
+class DumpQuorumMessage(message.Message):
+ """Represents the structure of a message to dump quorum information.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is
+ a system message.
+ IsForward (bool): Whether or not this message is forwarded.
+ IsReliable (bool): Whether or not this message should
+ use reliable delivery.
+ """
+ MessageType = "/" + __name__ + "/Quorum/DumpQuorum"
+
+ def __init__(self, minfo={}):
+ """Constructor for DumpQuorumMessage class.
+
+ Args:
+ minfo (dict): A dict containing initial values for the
+ new DumpQuorumMessage.
+ """
+ super(DumpQuorumMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ def dump(self):
+ """Returns a dict with information about the dump quorum message.
+
+ Returns:
+ dict: A dict with information about the dump quorum
+ message.
+ """
+ result = super(DumpQuorumMessage, self).dump()
+ return result
diff --git a/journal/consensus/quorum/protocols/__init__.py b/journal/consensus/quorum/protocols/__init__.py
new file mode 100644
index 0000000000..522437b1b7
--- /dev/null
+++ b/journal/consensus/quorum/protocols/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['quorum_vote']
diff --git a/journal/consensus/quorum/protocols/quorum_vote.py b/journal/consensus/quorum/protocols/quorum_vote.py
new file mode 100644
index 0000000000..a0527909ca
--- /dev/null
+++ b/journal/consensus/quorum/protocols/quorum_vote.py
@@ -0,0 +1,185 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+from collections import OrderedDict
+
+from journal.consensus.quorum.messages import quorum_ballot
+
+logger = logging.getLogger(__name__)
+
+
+class QuorumBallot(object):
+ """Represents a voting ballot in the quorum consensus mechanism.
+
+ Attributes:
+ Votes (dict): An orderd dict of votes.
+ """
+ def __init__(self):
+ """Constructor for the QuorumBallot class.
+ """
+ self.Votes = OrderedDict()
+
+ def vote(self, validatorID, txnID):
+ """Adds a vote.
+
+ Args:
+ validatorID (str): The id of a remote node.
+ txnID (str): The id of a transaction that is being voted
+ for.
+ """
+ if txnID not in self.Votes:
+ self.Votes[txnID] = set()
+
+ self.Votes[txnID].add(validatorID)
+
+ def count_votes(self, threshhold):
+ """Identifies transactions above a voting threshold.
+
+ Args:
+ threshold (int): The number of votes required to win.
+
+ Returns:
+ list: A list of transaction ids that won the vote.
+ """
+ txnlist = []
+ for txnID, votes in self.Votes.iteritems():
+ if len(votes) > threshhold:
+ txnlist.append(txnID)
+
+ return txnlist
+
+
+class QuorumVote(object):
+ """Represents the voting process in the quorum consensus mechanism.
+
+ Attributes:
+ VotingLedger (QuorumJournal): The ledger on which the voting is
+ taking place.
+ ValidatorID (str): The identifier of the local node.
+ VotingQuorum (list): A list of node identifiers participating in
+ the vote.
+ Threshholds (list): A list of voting threshholds.
+ BlockNumber (int): The block number.
+ Ballot (int): The ballot number.
+ LastBallot (int): The id of the previous ballot.
+ QuorumVote (list): A list of ballots.
+ OldBallotMessageHandler (EventHandler): The EventHandler tracking
+ calls to make when ballot messages are received.
+ OldCompleteMessageHandler (EventHandler): The EventHandler tracking
+ calls to make when quorum complete vote messages are
+ received.
+ """
+ def __init__(self, vledger, blocknum, txnlist):
+ """Construtor for the QuorumVote class.
+
+ Args:
+ vledger (QuorumJournal): The journal on which the voting is
+ taking place.
+ blocknum (int): The block number.
+ txnlist (list): A list of transactions to vote on.
+ """
+ self.VotingLedger = vledger
+ self.ValidatorID = vledger.LocalNode.Identifier
+ self.VotingQuorum = vledger.VotingQuorum
+ self.Threshholds = vledger.VoteThreshholds
+ self.BlockNumber = blocknum
+
+ self.Ballot = 0
+ self.LastBallot = len(self.Threshholds)
+ self.QuorumVote = [QuorumBallot() for x in range(self.LastBallot)]
+
+ for txnid in txnlist:
+ self.QuorumVote[self.Ballot].vote(self.ValidatorID, txnid)
+
+ self.OldBallotMessageHandler = self.VotingLedger.get_message_handler(
+ quorum_ballot.QuorumBallotMessage)
+ self.VotingLedger.register_message_handler(
+ quorum_ballot.QuorumBallotMessage,
+ self.quorum_ballot_handler)
+
+ self.OldCompleteMessageHandler = self.VotingLedger.get_message_handler(
+ quorum_ballot.QuorumCompleteVoteMessage)
+ self.VotingLedger.register_message_handler(
+ quorum_ballot.QuorumCompleteVoteMessage,
+ self.quorum_complete_vote_handler)
+
+ def close_current_ballot(self):
+ """Stops accepting further votes for this ballot and move to
+ the next one.
+
+ If this is the last ballot then close the vote completely.
+ """
+ threshhold = self.Threshholds[self.Ballot] * len(self.VotingQuorum)
+ txnlist = self.QuorumVote[self.Ballot].count_votes(threshhold)
+
+ self.Ballot += 1
+ if self.Ballot == self.LastBallot:
+ self.close_vote(txnlist)
+ return
+
+ # send our vote
+ msg = quorum_ballot.QuorumBallotMessage()
+ msg.Ballot = self.Ballot
+ msg.BlockNumber = self.BlockNumber
+ msg.TransactionIDs = txnlist
+
+ self.VotingLedger.broadcast_message(msg)
+
+ def quorum_ballot_handler(self, msg, vledger):
+ """Function called when the vledger receives a
+ QuorumBallotMessage from one of its peers.
+ """
+ sname = self.VotingLedger._id2name(msg.OriginatorID)
+
+ if msg.OriginatorID not in self.VotingQuorum:
+ logger.debug('received votes from %s, not in our quorum set',
+ sname)
+ return
+
+ if msg.BlockNumber != self.BlockNumber:
+ logger.info('received votes from %s for block %d, expecting %d',
+ sname, msg.BlockNumber, self.BlockNumber)
+ return
+
+ if msg.Ballot < self.Ballot or self.LastBallot <= msg.Ballot:
+ logger.info(
+ 'received votes from %s for ballot %d, currently '
+ 'processing %d',
+ sname, msg.Ballot, self.Ballot)
+ return
+
+ logger.debug('add votes from %s to ballot %d', sname, self.Ballot)
+ for txnid in msg.TransactionIDs:
+ self.QuorumVote[msg.Ballot].vote(msg.OriginatorID, txnid)
+
+ def quorum_complete_vote_handler(self, msg, vledger):
+ pass
+
+ def close_vote(self, txnlist):
+ """The last ballot has been closed so all voting is complete.
+
+ The only transactions left should be those voted by the
+ largest value in the threshhold array.
+ """
+ # don't process any more vote messages
+ self.VotingLedger.register_message_handler(
+ quorum_ballot.QuorumBallotMessage,
+ self.OldBallotMessageHandler)
+ self.VotingLedger.register_message_handler(
+ quorum_ballot.QuorumCompleteVoteMessage,
+ self.OldCompleteMessageHandler)
+
+ self.VotingLedger.complete_vote(self.BlockNumber, txnlist)
diff --git a/journal/consensus/quorum/quorum_journal.py b/journal/consensus/quorum/quorum_journal.py
new file mode 100644
index 0000000000..0243c50662
--- /dev/null
+++ b/journal/consensus/quorum/quorum_journal.py
@@ -0,0 +1,308 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import math
+import random
+import time
+
+from journal.consensus.quorum import quorum_transaction_block
+from journal import journal_core
+from journal.consensus.quorum.messages import quorum_advertisement
+from journal.consensus.quorum.messages import quorum_debug
+from journal.consensus.quorum.messages import quorum_ballot
+from journal.consensus.quorum.protocols import quorum_vote
+
+logger = logging.getLogger(__name__)
+
+
+class QuorumJournal(journal_core.Journal):
+ """Implements a journal based on participant voting.
+
+ Attributes:
+ VoteTimeInterval (float): The minimum time between votes, in
+ seconds.
+ VoteTimeFudgeFactor (float): The average fudge factor added to
+ the vote interval, in seconds.
+ BallotTimeInterval (float): The minimum time between ballots on
+ a vote, in seconds.
+ BallotTimeFudgeFactor (float): The average fudge factor added
+ to the ballot interval, in seconds.
+ VoteThreshholds (list): The mimimum votes required for a
+ transaction to proceed to the next ballot.
+ VotingQuorumTargetSize (int): The target size for the local
+ quorum set (note: this should be a function of network size).
+ VotingQuorum (dict): The nodes in the quorum.
+ CurrentQuorumVote (QuorumVote): The vote in progress.
+ NextVoteTime (float): When the next vote will occur.
+ NextBallotTime (float): When the next ballot will occur.
+ onHeartBeatTimer (EventHandler): The EventHandler tracking calls
+ to make when the heartbeat timer fires.
+ """
+ # minimum time between votes
+ VoteTimeInterval = 30.0
+
+ # average fudge factor added to the vote interval
+ VoteTimeFudgeFactor = 1.0
+
+ # minimum time between ballots on a vote
+ BallotTimeInterval = 5.0
+
+ # average fudge factor added to the time interval
+ BallotTimeFudgeFactor = 0.1
+
+ # minimum votes required for a transaction to proceed to the next ballot
+ VoteThreshholds = [0.0, 0.5, 0.7, 0.9]
+
+ # target size for local quorum set, note this should be a function of
+ # network size
+ VotingQuorumTargetSize = 13
+
+ def __init__(self, node, **kwargs):
+ """Constructor for the QuorumJournal class.
+
+ Args:
+ node (Node): The local node.
+ """
+ super(QuorumJournal, self).__init__(node, **kwargs)
+
+ self.VotingQuorum = dict()
+ # we are always a member of our own quorum
+ self.VotingQuorum[self.LocalNode.Identifier] = self.LocalNode
+
+ self.CurrentQuorumVote = None
+ self.NextVoteTime = self._nextvotetime()
+ self.NextBallotTime = 0
+
+ self.onHeartbeatTimer += self._triggervote
+
+ quorum_advertisement.register_message_handlers(self)
+ quorum_debug.register_message_handlers(self)
+ quorum_ballot.register_message_handlers(self)
+
+ #
+ # GENERAL JOURNAL API
+ #
+
+ def post_initialize(self):
+ """Sends a quorum advertisement to the network.
+ """
+ quorum_advertisement.send_quorum_advertisement_message(self)
+
+ def build_transaction_block(self, force=False):
+ """Builds the next transaction block for the journal.
+
+ Note:
+ For the voting journal this operation is meaningful only
+ for the initial block. All other blocks are created after
+ voting completes.
+
+ Args:
+ force (boolean): Force creation of the initial block.
+
+ Returns:
+ QuorumTransactionBlock: The created transaction block.
+ """
+
+ logger.debug('build transaction block')
+
+ if force:
+ block = quorum_transaction_block.QuorumTransactionBlock()
+ block.BlockNumber = 0
+ return block
+
+ return None
+
+ def handle_fork(self, tblock):
+ """Handle the case where we are attempting to commit a block
+ that is not connected to the current block chain. This is a
+ no-op for the QuorumJournal.
+
+ Args:
+ tblock (QuorumTransactionBlock): The block to commit.
+ """
+ logger.info(
+ 'received a forked block %s from %s with previous id %s, '
+ 'expecting %s',
+ tblock.Identifier[:8], self._id2name(tblock.OriginatorID),
+ tblock.PreviousBlockID[:8], self.MostRecentCommitedBlockID[:8])
+
+ #
+ # CUSTOM JOURNAL API
+ #
+
+ def add_quorum_node(self, node):
+ """Adds a node to this node's quorum set.
+
+ Args:
+ node (Node): The node to add to the quorum set.
+ """
+ logger.info('attempt to add quorum voting node %s to %s', str(node),
+ str(self.LocalNode))
+
+ if node.Identifier in self.VotingQuorum:
+ logger.info('attempt to add duplicate node to quorum')
+ return
+
+ target = int(-1.0 * self.VotingQuorumTargetSize *
+ math.log(1.0 - random.random()))
+ if len(self.VotingQuorum) - 1 > target:
+ return
+
+ logger.info('add node %s to voting quorum', node.Identifier[:8])
+ self.VotingQuorum[node.Identifier] = node
+
+ def initiate_vote(self):
+ """Initiates a new vote.
+
+ This method is called when the vote timer expires indicating
+ that a new vote should be initiated.
+ """
+ logger.info('quorum, initiate, %s', self.MostRecentCommitedBlockID[:8])
+
+ # Get the list of prepared transactions, if there aren't enough then
+ # reset the timers and return since there is nothing to vote on
+ txnlist = self._preparetransactionlist()
+ if len(txnlist) < self.MinimumTransactionsPerBlock:
+ logger.debug('insufficient transactions for vote; %d out of %d',
+ len(txnlist), self.MinimumTransactionsPerBlock)
+
+ self.NextVoteTime = self._nextvotetime()
+ self.NextBallotTime = 0
+ return
+
+ # we are initiating the vote, send the message to the world
+ newblocknum = self.MostRecentCommitedBlock.BlockNumber + 1
+ msg = quorum_vote.QuorumInitiateVoteMessage()
+ msg.BlockNumber = newblocknum
+ self.forward_message(msg)
+
+ # and get our own process rolling
+ self.handle_vote_initiation(newblocknum)
+
+ def handle_vote_initiation(self, blocknum):
+ """Handles an incoming VoteInitiation message.
+
+ Args:
+ blocknum (int): The block number for the proposed vote.
+
+ Returns:
+ bool: True if this is a new, valid vote, false otherwise.
+ """
+
+ if blocknum != self.MostRecentCommitedBlock.BlockNumber + 1:
+ logger.warn(
+ 'attempt initiate vote on block %d, expecting block %d',
+ blocknum, self.MostRecentCommitedBlock.BlockNumber + 1)
+ return False
+
+ if self.CurrentQuorumVote:
+ logger.debug(
+ 'received request to start a vote already in progress')
+ return False
+
+ logger.info('quorum, handle initiate, %s',
+ self.MostRecentCommitedBlockID[:8])
+
+ txnlist = self._preparetransactionlist()
+ self.CurrentQuorumVote = quorum_vote.QuorumVote(self, blocknum,
+ txnlist)
+ self.NextVoteTime = 0
+ self.NextBallotTime = self._nextballottime()
+
+ return True
+
+ def close_current_ballot(self):
+ """Closes the current ballot.
+
+ This method is called when the timer indicates that the vote for a
+ particular ballot is complete.
+ """
+ logger.info('quorum, ballot, %s, %d',
+ self.MostRecentCommitedBlockID[:8],
+ self.CurrentQuorumVote.Ballot)
+
+ self.NextBallotTime = self._nextballottime()
+ self.CurrentQuorumVote.close_current_ballot()
+
+ def complete_vote(self, blocknum, txnlist):
+ """Close the current vote.
+
+ This is called by the QuorumVote object after the last ballot has been
+ closed. The specified transactions can be safely added to journal.
+
+ Args:
+ blocknum (int): The block identifier.
+ txnlist (list): A list of transactions that nodes voted to
+ include in the block.
+ """
+
+ logger.debug('complete the vote for block based on %s',
+ self.MostRecentCommitedBlockID)
+
+ if blocknum != self.MostRecentCommitedBlock.BlockNumber + 1:
+ logger.warn(
+ 'attempt complete vote on block %d, expecting block %d',
+ blocknum, self.MostRecentCommitedBlock.BlockNumber + 1)
+ return
+
+ nblock = quorum_transaction_block.QuorumTransactionBlock()
+ nblock.BlockNumber = blocknum
+ nblock.PreviousBlockID = self.MostRecentCommitedBlockID
+ nblock.TransactionIDs = txnlist[:]
+ nblock.sign_from_node(self.LocalNode)
+
+ logger.info('commit: %s', nblock.dump())
+
+ self.commit_transaction_block(nblock)
+
+ self.CurrentQuorumVote = None
+ self.NextVoteTime = self._nextvotetime()
+ self.NextBallotTime = 0
+
+ #
+ # UTILITY FUNCTIONS
+ #
+
+ def _triggervote(self, now):
+ """
+ Handle timer events
+ """
+
+ if self.NextVoteTime != 0:
+ if self.NextVoteTime < now:
+ self.initiate_vote()
+
+ elif self.NextBallotTime != 0:
+ if self.NextBallotTime < now:
+ self.close_current_ballot()
+
+ def _nextvotetime(self, now=0):
+ """
+ Generate the time for the next vote to be initiated
+ """
+ if now == 0:
+ now = time.time()
+ return now + self.VoteTimeInterval + random.expovariate(
+ 1.0 / self.VoteTimeFudgeFactor)
+
+ def _nextballottime(self, now=0):
+ """
+ Generate the time for the next ballot to be initiated
+ """
+ if now == 0:
+ now = time.time()
+ return now + self.BallotTimeInterval + random.expovariate(
+ 1.0 / self.BallotTimeFudgeFactor)
diff --git a/journal/consensus/quorum/quorum_transaction_block.py b/journal/consensus/quorum/quorum_transaction_block.py
new file mode 100644
index 0000000000..0a3f5dac8d
--- /dev/null
+++ b/journal/consensus/quorum/quorum_transaction_block.py
@@ -0,0 +1,100 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from journal import transaction_block
+from journal.messages import transaction_block_message
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(journal):
+ """Registers quorum transaction block message handlers with
+ the journal.
+
+ Args:
+ journal (QuorumJournal): The journal on which to register the
+ message handlers.
+ """
+ journal.register_message_handler(
+ QuorumTransactionBlockMessage,
+ transaction_block_message.transaction_block_message_handler)
+
+
+class QuorumTransactionBlockMessage(
+ transaction_block_message.TransactionBlockMessage):
+ """Quorum transaction block messages represent the message format
+ for exchanging information about quorum transaction blocks.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ """
+ MessageType = "/" + __name__ + "/TransactionBlock"
+
+ def __init__(self, minfo={}):
+ """Constructor for QuorumTransactionBlockMessage.
+
+ Args:
+ minfo (dict): A dict of initial values for the new
+ QuorumTransactionBlockMessage.
+ """
+ super(QuorumTransactionBlockMessage, self).__init__(minfo)
+
+ tinfo = minfo.get('TransactionBlock', {})
+ self.TransactionBlock = QuorumTransactionBlock(tinfo)
+
+
+class QuorumTransactionBlock(transaction_block.TransactionBlock):
+ """A quorum transaction block is a set of quorum transactions to
+ be applied to a ledger.
+
+ Attributes:
+ TransactionBlockTypeName (str): The name of the quorum block
+ type.
+ MessageType (type): The quorum transaction block message
+ class.
+ BlockNumber (int): The number of the block.
+ """
+ TransactionBlockTypeName = '/Quorum'
+ MessageType = QuorumTransactionBlockMessage
+
+ def __init__(self, minfo={}):
+ """Constructor for the QuorumTransactionBlock class.
+
+ Args:
+ minfo (dict): A dict containing intitial values for
+ the new QuorumTransactionBlock.
+ """
+ super(QuorumTransactionBlock, self).__init__(minfo)
+ self.BlockNumber = minfo.get('BlockNumber', 0)
+
+ def __str__(self):
+ return "{0}, {1}, {2:0.2f}, {3}".format(
+ self.Identifier[:8], len(self.TransactionIDs), self.CommitTime,
+ self.BlockNumber)
+
+ def dump(self):
+ """Returns a dict with information about the quorum transaction
+ block.
+
+ Returns:
+ dict: A dict containing info about the quorum transaction
+ block.
+ """
+ result = super(QuorumTransactionBlock, self).dump()
+ result['BlockNumber'] = self.BlockNumber
+
+ return result
diff --git a/journal/global_store_manager.py b/journal/global_store_manager.py
new file mode 100644
index 0000000000..463afdd974
--- /dev/null
+++ b/journal/global_store_manager.py
@@ -0,0 +1,535 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import anydbm
+import copy
+import logging
+
+from gossip.common import cbor2dict, dict2cbor, NullIdentifier
+
+logger = logging.getLogger(__name__)
+
+
+class ReadOnlyException(BaseException):
+ """An exception thrown when an update is attempted on a read-only store.
+ """
+ pass
+
+
+class GlobalStoreManager(object):
+ """The GlobalStoreManager class encapsulates persistent management
+ of state associated with blocks in the ledger.
+
+ To use the the class, first create a BlockStore that is initialized with
+ an empty store (which should be a subclass of the KeyValueStore class)
+ for each transaction family in the ledger. Commit the initial block
+ with the method CommitRootBlock. This step is necessary whether or not
+ this is the first time the validator is run.
+
+ Attributes:
+ RootBlockID (str): The ID of the root block.
+ """
+
+ RootBlockID = NullIdentifier
+
+ def __init__(self, blockstorefile='blockstore', dbmode='c'):
+ """Initialize a GlobalStoreManager, opening the database file.
+
+ Args:
+ blockstorefile (str): The name of the file to use for
+ persistent data.
+ dbmode (str): The mode used to open the file (see anydbm
+ parameters).
+ """
+ logger.info('create blockstore from file %s with flag %s',
+ blockstorefile, dbmode)
+
+ self._blockmap = {}
+ self._persistmap = anydbm.open(blockstorefile, dbmode)
+
+ rootstore = BlockStore()
+ rootstore.commit_block(self.RootBlockID)
+ self._blockmap[self.RootBlockID] = rootstore
+ self._persistmap[self.RootBlockID] = dict2cbor(rootstore.dump_block())
+ self._persistmap.sync()
+
+ logger.debug('the persitent block store has %s keys',
+ len(self._persistmap))
+
+ def close(self):
+ """Close the database file.
+ """
+ self._persistmap.close()
+
+ def add_transaction_store(self, tname, tstore):
+ """Registers a data store type with a particular transaction type.
+
+ Args:
+ tname (str): The name of the transaction type.
+ tstore (KeyValueStore): The data store to associate with the
+ transaction type.
+ """
+
+ # we should really only be adding transaction stores to the
+ # root block, if this fails we need to think more about the
+ # initialization
+ assert len(self._blockmap) == 1
+
+ rootstore = self._blockmap[self.RootBlockID]
+ rootstore.add_transaction_store(tname, tstore)
+
+ rootstore.commit_block(self.RootBlockID)
+ self._blockmap[self.RootBlockID] = rootstore
+ self._persistmap[self.RootBlockID] = dict2cbor(rootstore.dump_block())
+ self._persistmap.sync()
+
+ def commit_block_store(self, blockid, blockstore):
+ """Associates the blockstore with the blockid and commits
+ the blockstore to disk.
+
+ Marks the blockstore read only as part of the process.
+
+ Args:
+ blockid (str): The identifier to associate with the block.
+ blockstore (BlockStore): An initialized blockstore to be
+ used as the root store.
+ """
+
+ # if we commit a block then we know that either this is the genesis
+ # block or that the previous block is committed already
+ assert blockstore.PreviousBlockID in self._persistmap
+
+ blockstore.commit_block(blockid)
+ self._blockmap[blockid] = blockstore
+ self._persistmap[blockid] = dict2cbor(blockstore.dump_block())
+ self._persistmap.sync()
+
+ def get_block_store(self, blockid):
+ """Gets the blockstore associated with a particular blockid.
+
+ This method will ensure that all previous block stores are
+ loaded as well.
+
+ Args:
+ blockid (str): Identifier associated with the block.
+
+ Returns:
+ BlockStore: The blockstore associated with the identifier.
+ """
+ if blockid not in self._blockmap:
+ logger.debug(
+ 'load storage for block %s from persistent block store',
+ blockid)
+
+ if blockid not in self._persistmap:
+ raise KeyError('unknown block', blockid)
+
+ blockinfo = cbor2dict(self._persistmap[blockid])
+ prevstore = self.get_block_store(blockinfo['PreviousBlockID'])
+ blockstore = prevstore.clone_block(blockinfo)
+ blockstore.commit_block(blockid)
+ self._blockmap[blockid] = blockstore
+
+ return self._blockmap[blockid]
+
+ def flush_block_store(self, blockid):
+ """Removes the memory copy of this block and all predecessors.
+
+ Note:
+ Generally this is done through the FlattenBlockStore method.
+
+ Args:
+ blockid (str): Identifier associated with the block.
+ """
+
+ if blockid == self.RootBlockID:
+ return
+
+ blockstore = self._blockmap.get(blockid)
+ if blockstore is None:
+ return
+
+ self.flush_block_store(blockstore.PreviousBlockID)
+ del self._blockmap[blockid]
+
+ def flatten_block_store(self, blockid):
+ """Collapses the history of this blockstore into a single blockstore.
+
+ Flattening creates duplicate copies of the objects so it is
+ important to release the history of the blockstore from memory.
+ It is best if this is called only for relatively old blocks
+ that are unlikely to be rolled back.
+
+ Args:
+ blockid (str): Identifier associated with the block.
+ """
+ blockstore = self._blockmap[blockid]
+
+ blockstore.flatten()
+ self._persistmap[blockid] = dict2cbor(blockstore.dump_block())
+ self._persistmap.sync()
+
+ self.flush_block_store(blockstore.PreviousBlockID)
+
+
+class BlockStore(object):
+ """The BlockManager class captures the ledger state associated with
+ a single block.
+
+ The ledger consists of a copy of the data store associated with
+ each transaction family as it exists after all transactions in the
+ block have been applied to the state from the preceding block.
+
+ With the exception of the root block, all others should be created
+ through the clone_block method which will preserve the ordering of
+ the stores.
+
+ Attributes:
+ PrevBlock (BlockStore): The previous block.
+ BlockID (str): The ID of the root block.
+ TransactionStores (dict): The transaction stores associated with
+ this block store.
+ """
+
+ def __init__(self, prevblock=None, blockinfo=None):
+ """Initializes a new BlockStore.
+
+ Args:
+ prevblock (BlockStore): Optional parameter to initialize
+ previous block pointer, required for all but the root block.
+ blockinfo (dict): Optional initial data for the block.
+ """
+
+ self.PrevBlock = prevblock
+
+ self.BlockID = GlobalStoreManager.RootBlockID
+ self.TransactionStores = {}
+
+ if self.PrevBlock:
+ for tname, tstore in self.PrevBlock.TransactionStores.iteritems():
+ storeinfo = blockinfo['TransactionStores'][
+ tname] if blockinfo else None
+ self.add_transaction_store(
+ tname, tstore.clone_store(storeinfo))
+
+ @property
+ def PreviousBlockID(self):
+ """
+ Return the identifier associated with the previous block,
+ NullIdentifier if this is the root block (ie there is no previous
+ block)
+ """
+ return self.PrevBlock.BlockID if self.PrevBlock else NullIdentifier
+
+ def add_transaction_store(self, tname, tstore):
+ """Register a data store type with a particular transaction type.
+
+ Args:
+ tname (str): The name of the transaction type
+ tstore (KeyValueStore): The data store to associate with the
+ transaction type
+ """
+ self.TransactionStores[tname] = tstore
+
+ def get_transaction_store(self, tname):
+ """Return the transaction store associated with a particular
+ transaction family.
+
+ Args:
+ tname (str): The name of the transaction family
+
+ Returns:
+ KeyValueStore: The store associated with the family.
+ """
+ return self.TransactionStores[tname]
+
+ def clone_block(self, blockinfo=None):
+ """Create a copy of the ledger by creating and registering a copy
+ of each store in the current ledger.
+
+ Args:
+ blockinfo (dict): Optional output of the dump() method.
+ """
+ return BlockStore(self, blockinfo)
+
+ def commit_block(self, blockid):
+ """Persist the state of the store to disk.
+
+ This should be called when the block is committed so we know
+ that the state will not change.
+
+ Args:
+ blockid (str): The identifier associated with the block.
+ """
+ self.BlockID = blockid
+ for tstore in self.TransactionStores.itervalues():
+ tstore.commit()
+
+ def flatten(self):
+ """Flatten the store at this point.
+ """
+ for tstore in self.TransactionStores.itervalues():
+ tstore.flatten()
+
+ def dump_block(self):
+ """Serialize the stores associated with this block.
+
+ Returns:
+ dict: Information about the stores associated with this
+ block.
+ """
+ result = dict()
+ result['BlockID'] = self.BlockID
+ result['PreviousBlockID'] = self.PreviousBlockID
+ result['TransactionStores'] = {}
+ for tname, tstore in self.TransactionStores.iteritems():
+ result['TransactionStores'][tname] = tstore.dump()
+
+ return result
+
+
+class KeyValueStore(object):
+ """
+ The KeyValueStore class implements a journaling dictionary that
+ enables rollback through generational updates.
+
+ For optimization the chain of stores can be flattened to limit
+ traversal of the chain.
+
+ Attributes:
+ ReadOnly (bool): Whether or not the store is read only.
+ PrevStore (KeyValueStore): The previous checkpoint of the store.
+ """
+
+ def __init__(self, prevstore=None, storeinfo=None):
+ """Initialize a new KeyValueStore object.
+
+ Args:
+ prevstore (KeyValueStore): A reference to the previous
+ checkpoint of the store.
+ storeinfo (dict): Optional output of the dump() method,
+ forces committed state.
+ """
+
+ self.ReadOnly = False
+ self.PrevStore = prevstore
+
+ if storeinfo:
+ self._store = copy.deepcopy(storeinfo['Store'])
+ self._deletedkeys = set(storeinfo['DeletedKeys'])
+ else:
+ self._store = dict()
+ self._deletedkeys = set()
+
+ def clone_store(self, storeinfo=None):
+ """Creates a new checkpoint that can be modified.
+
+ Args:
+ storeinfo (dict): Information about the store to clone.
+
+ Returns:
+ KeyValueStore: A new checkpoint that extends the current
+ store.
+ """
+ return KeyValueStore(self, storeinfo)
+
+ def commit(self):
+ """Marks the store as read only.
+
+ Do not allow any further modifications to this store or
+ through this store to previous checkpoints.
+ """
+ self.ReadOnly = True
+
+ def compose(self, readonly=True):
+ """Creates a dictionary that is the composition of all
+ previous stores.
+
+ The dictionary that is created contains a copy of the
+ dictionary entries.
+
+ Args:
+ readonly (bool): Whether or not the copy will be read only,
+ in which case a deep copy is not performed.
+
+ Returns:
+ dict: A dictionary with a copy of all items in the store.
+ """
+ copyfn = copy.copy if readonly else copy.deepcopy
+
+ result = self.PrevStore.compose() if self.PrevStore else dict()
+
+ # copy our dictionary into the result
+ result.update(copyfn(self._store))
+
+ # remove the deleted keys from the store
+ for k in self._deletedkeys:
+ result.pop(k, None)
+
+ # it would be possible to flatten the store here since
+ # we've already done all the work; however, that would still
+ # use too much memory. we only want to flatten if we are
+ # flushing previous state
+
+ return result
+
+ def flatten(self):
+ """Truncates the journal history at this point.
+
+ Collapse all previous stores into this one and remove any
+ reverse references.
+ """
+ if self.ReadOnly:
+ self._store = self.compose(readonly=True)
+ self._deletedkeys = set()
+ self.PrevStore = None
+
+ def get(self, key):
+ """Gets the value associated with a key, cascading the
+ request through the chain of stores.
+
+ Args:
+ key (str): The key to lookup.
+
+ Returns:
+ object: The value associated with the key.
+ """
+ if key in self._store:
+ return copy.deepcopy(self._store[key])
+
+ if self.PrevStore and key not in self._deletedkeys:
+ return self.PrevStore.get(key)
+
+ raise KeyError('attempt to access missing key', key)
+
+ def __getitem__(self, key):
+ return self.get(key)
+
+ def set(self, key, value):
+ """Sets the value associated with a key.
+
+ Note:
+ This change only occurs in the current checkpoint.
+
+ Args:
+ key (str): The key to set.
+ value (str): The value to bind to the key. A deepcopy is
+ made.
+ """
+ if self.ReadOnly:
+ raise ReadOnlyException("Attempt to modify readonly store")
+
+ self._store[key] = copy.deepcopy(value)
+ self._deletedkeys.discard(key)
+
+ def __setitem__(self, key, value):
+ self.set(key, value)
+
+ def delete(self, key):
+ """Removes the key from the current store if it exists and
+ adds it to the deleted keys list if it exists in previous
+ checkpoints.
+
+ Args:
+ key (str): The key to delete.
+ """
+
+ if self.ReadOnly:
+ raise ReadOnlyException("Attempt to modify readonly store")
+
+ self._store.pop(key, None)
+ self._deletedkeys.add(key)
+
+ def __delitem__(self, key):
+ self.delete(key)
+
+ def has_key(self, key):
+ """Walks the chain to determine if the key exists in the store.
+
+ Args:
+ key (str): The key to search for.
+
+ Returns:
+ bool: Whether or not the key exists in the store.
+ """
+ if key in self._store:
+ return True
+
+ if self.PrevStore and key not in self._deletedkeys:
+ return key in self.PrevStore
+
+ return False
+
+ def _keys(self):
+ """Computes the set of valid keys used in the store.
+
+ Returns:
+ set: The set of valid keys in the store.
+ """
+ kset = self.PrevStore._keys() if self.PrevStore else set()
+ kset -= self._deletedkeys
+ kset |= set(self._store.keys())
+
+ return kset
+
+ def keys(self):
+ """Computes the set of valid keys used in the store.
+
+ Returns:
+ list: A list of valid keys in the store.
+ """
+ return list(self._keys())
+
+ def __iter__(self):
+ """Create an iterator for the keys.
+ """
+ for k in self.keys():
+ yield k
+
+ def iteritems(self):
+ """Creates an iterator for items in the store.
+ """
+ for k in self.keys():
+ yield k, self.get(k)
+
+ def __contains__(self, key):
+ """Determines whether a key occurs in the store.
+
+ Args:
+ key (str): A key to test.
+
+ Returns:
+ bool: Whether the key exists in the store.
+ """
+
+ if key in self._store:
+ return True
+
+ if key in self._deletedkeys:
+ return False
+
+ return self.PrevStore and self.PrevStore.__contains__(key)
+
+ def dump(self):
+ """Returns a dict containing information about the store.
+
+ Returns:
+ dict: A dict containing information about the store.
+ """
+ result = dict()
+ result['Store'] = copy.deepcopy(self._store)
+ result['DeletedKeys'] = list(self._deletedkeys)
+
+ return result
diff --git a/journal/journal_core.py b/journal/journal_core.py
new file mode 100644
index 0000000000..08f6d05c0b
--- /dev/null
+++ b/journal/journal_core.py
@@ -0,0 +1,1043 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import shelve
+import time
+from collections import OrderedDict
+
+from gossip import common, event_handler, gossip_core, stats
+from journal import transaction, transaction_block
+from journal.global_store_manager import GlobalStoreManager
+from journal.messages import transaction_message
+from journal.messages import transaction_block_message
+from journal.messages import journal_transfer
+from journal.messages import journal_debug
+
+logger = logging.getLogger(__name__)
+
+
+class Journal(gossip_core.Gossip):
+ """The base journal class.
+
+ Attributes:
+ MaximumBlocksToKeep (int): Maximum number of blocks to keep in cache.
+ MinimumTransactionsPerBlock (int): Minimum number of transactions
+ per block.
+ MaximumTransactionsPerBlock (int): Maximum number of transactions
+ per block.
+ MissingRequestInterval (float): Time in seconds between sending
+ requests for a missing transaction block.
+ StartTime (float): The initialization time of the journal in
+ seconds since the epoch.
+ Initializing (bool): Whether or not the journal is in an
+ initializing state.
+ InitialTransactions (list): A list of initial transactions to
+ process.
+ InitialBlockList (list): A list of initial blocks to process.
+ GenesisLedger (bool): Whether or not this journal is associated
+ with a genesis node.
+ Restore (bool): Whether or not to restore block data.
+ onGenesisBlock (EventHandler): An EventHandler for functions
+ to call when processing a genesis block.
+ onBuildBlock (EventHandler): An EventHandler for functions
+ to call when processing a build block.
+ onClaimBlock (EventHandler): An EventHandler for functions
+ to call when processing a claim block.
+ onCommitBlock (EventHandler): An EventHandler for functions
+ to call when processing a commit block.
+ onDecommitBlock (EventHandler): An EventHandler for functions
+ to call when processing a decommit block.
+ onBlockTest (EventHandler): An EventHandler for functions
+ to call when processing a block test.
+ PendingTransactions (dict): A dict of pending, unprocessed
+ transactions.
+ TransactionStore (Shelf): A dict-like object representing
+ the persisted copy of the transaction store.
+ BlockStore (Shelf): A dict-like object representing the
+ persisted copy of the block store.
+ ChainStore (Shelf): A dict-like object representing the
+ persisted copy of the chain store.
+ RequestedTransactions (dict): A dict of transactions which are
+ not in the local cache, the details of which have been
+ requested from peers.
+ RequestedBlocks (dict): A dict of blocks which are not in the
+ local cache, the details of which have been requested
+ from peers.
+ MostRecentCommitedBlockID (str): The block ID of the most
+ recently committed block.
+ PendingTransactionBlock (TransactionBlock): The constructed
+ pending transaction block.
+ PendingBlockIDs (set): A set of pending block identifiers.
+ InvalidBlockIDs (set): A set of invalid block identifiers.
+ FrontierBlockIDs (set): A set of block identifiers for blocks
+ which still need to be processed.
+ GlobalStoreMap (GlobalStoreManager): Manages access to the
+ various persistence stores.
+ """
+
+ # For storage management, minimum blocks to keep cached
+ MaximumBlocksToKeep = 10
+
+ # Minimum number of transactions per block
+ MinimumTransactionsPerBlock = 10
+
+ # Maximum number of transactions per block
+ MaximumTransactionsPerBlock = 200
+
+ # Time between sending requests for a missing transaction block
+ MissingRequestInterval = 30.0
+
+ def __init__(self, node, **kwargs):
+ """Constructor for the Journal class.
+
+ Args:
+ node (Node): The local node.
+ GenesisLedger (bool): Whether or not this journal is associated
+ with a genesis node.
+ Restore (bool): Whether or not to restore block data.
+ DataDirectory (str):
+ """
+ super(Journal, self).__init__(node, **kwargs)
+
+ self.StartTime = time.time()
+ self.Initializing = True
+
+ self.InitialTransactions = []
+ self.InitialBlockList = []
+
+ self.GenesisLedger = kwargs.get('GenesisLedger', False)
+ self.Restore = kwargs.get('Restore', False)
+
+ # set up the event handlers that the transaction families can use
+ self.onGenesisBlock = event_handler.EventHandler('onGenesisBlock')
+ self.onBuildBlock = event_handler.EventHandler('onBuildBlock')
+ self.onClaimBlock = event_handler.EventHandler('onClaimBlock')
+ self.onCommitBlock = event_handler.EventHandler('onCommitBlock')
+ self.onDecommitBlock = event_handler.EventHandler('onDecommitBlock')
+ self.onBlockTest = event_handler.EventHandler('onBlockTest')
+
+ # this flag indicates whether we should create a completely new
+ # database file or reuse an existing file
+ shelveflag = 'c' if self.Restore else 'n'
+ shelvedir = kwargs.get('DataDirectory', 'db')
+
+ self.PendingTransactions = OrderedDict()
+
+ dbprefix = shelvedir + "/" + str(self.LocalNode)
+ self.TransactionStore = shelve.open(dbprefix + "_txn", shelveflag)
+ self.BlockStore = shelve.open(dbprefix + "_cb", shelveflag)
+ self.ChainStore = shelve.open(dbprefix + "_cs", shelveflag)
+
+ self.RequestedTransactions = {}
+ self.RequestedBlocks = {}
+
+ self.MostRecentCommitedBlockID = common.NullIdentifier
+ self.PendingTransactionBlock = None
+
+ self.PendingBlockIDs = set()
+ self.InvalidBlockIDs = set()
+ self.FrontierBlockIDs = set()
+
+ # Set up the global store and transaction handlers
+ self.GlobalStoreMap = GlobalStoreManager(
+ shelvedir + "/" + str(self.LocalNode) + "_gs", shelveflag)
+
+ # initialize the ledger stats data structures
+ self._initledgerstats()
+
+ # connect the message handlers
+ transaction_message.register_message_handlers(self)
+ transaction_block_message.register_message_handlers(self)
+ journal_transfer.register_message_handlers(self)
+ journal_debug.register_message_handlers(self)
+
+ @property
+ def CommitedBlockCount(self):
+ """Returns the block number of the most recently committed block.
+
+ Returns:
+ int: most recently committed block number.
+ """
+ return self.MostRecentCommitedBlock.BlockNum
+
+ @property
+ def CommitedTxnCount(self):
+ """Returns the committed transaction count.
+
+ Returns:
+ int: the transaction depth based on the most recently
+ committed block.
+ """
+ return self.MostRecentCommitedBlock.TransactionDepth
+
+ @property
+ def PendingBlockCount(self):
+ """Returns the number of pending blocks.
+
+ Returns:
+ int: the number of pending blocks.
+ """
+ return len(self.PendingBlockIDs)
+
+ @property
+ def PendingTxnCount(self):
+ """Returns the number of pending transactions.
+
+ Returns:
+ int: the number of pending transactions.
+ """
+ return len(self.PendingTransactions)
+
+ def shutdown(self):
+ """Shuts down the journal in an orderly fashion.
+ """
+ logger.info('close journal databases in preparation for shutdown')
+
+ # Global store manager handles its own database
+ self.GlobalStoreMap.close()
+
+ self.TransactionStore.close()
+ self.BlockStore.close()
+ self.ChainStore.close()
+
+ super(Journal, self).shutdown()
+
+ def add_transaction_store(self, family):
+ """Add a transaction type-specific store to the global store.
+
+ Args:
+ family (Transaction): The transaction family.
+ """
+ tname = family.TransactionTypeName
+ tstore = family.TransactionStoreType()
+ self.GlobalStoreMap.add_transaction_store(tname, tstore)
+
+ @property
+ def GlobalStore(self):
+ """Returns a reference to the global store associated with the
+ most recently commited block that this validator possesses.
+
+ Returns:
+ Shelf: The block store.
+ """
+ blkid = self.MostRecentCommitedBlockID
+
+ return self.GlobalStoreMap.get_block_store(blkid)
+
+ @property
+ def MostRecentCommitedBlock(self):
+ """Returns the most recently committed block.
+
+ Returns:
+ dict: the most recently committed block.
+ """
+ return self.BlockStore.get(self.MostRecentCommitedBlockID)
+
+ def commited_block_ids(self, count=0):
+ """Returns the list of block identifiers starting from the
+ most recently commited block.
+
+ Args:
+ count (int): How many results should be returned.
+
+ Returns:
+ list: A list of committed block ids.
+ """
+ if count == 0:
+ count = len(self.BlockStore)
+
+ blockids = []
+
+ blkid = self.MostRecentCommitedBlockID
+ while blkid != common.NullIdentifier and len(blockids) < count:
+ blockids.append(blkid)
+ blkid = self.BlockStore[blkid].PreviousBlockID
+
+ return blockids
+
+ def initialization_complete(self):
+ """Processes all invocations that arrived while the ledger was
+ being initialized.
+ """
+ logger.info('process initial transactions and blocks')
+
+ self.Initializing = False
+
+ # this is a very special case where the ledger is started with an
+ # existing database but no validation network. the situation can (and
+ # has) occurred with deployments where all validators fail. so this is
+ # really the solutin of last resort, it assumes that all databases are
+ # successfully restored
+ if self.GenesisLedger and self.Restore:
+ logger.warn('restore ledger state from the backup data stores')
+ self.MostRecentCommitedBlockID = \
+ self.ChainStore['MostRecentBlockID']
+ self.post_initialize()
+ return
+
+ for txn in self.InitialTransactions:
+ self.add_pending_transaction(txn)
+ self.InitialTransactions = None
+
+ for block in self.InitialBlockList:
+ self.commit_transaction_block(block)
+ self.InitialBlockList = None
+
+ # Let the subclasses handle their on post initialization
+ self.post_initialize()
+
+ # generate a block, if none exists then generate and commit the root
+ # block
+ if self.GenesisLedger:
+ logger.warn('node %s claims the genesis block',
+ self.LocalNode.Name)
+ self.onGenesisBlock.fire(self)
+ self.claim_transaction_block(self.build_transaction_block(True))
+ else:
+ if self.MostRecentCommitedBlockID == common.NullIdentifier:
+ logger.critical('no ledger for a new network node')
+ return
+
+ logger.info('finished processing initial transactions and blocks')
+
+ def post_initialize(self):
+ """Called after initialization completes and before the initial
+ block is created.
+
+ Note:
+ This is intended for subclasses to specialize more easily.
+ """
+ pass
+
+ def add_pending_transaction(self, txn):
+ """Adds a transaction to the list of candidates for commit.
+
+ Args:
+ txn (Transaction.Transaction): The newly arrived transaction
+ """
+ logger.debug('incoming transaction %s', txn.Identifier[:8])
+
+ # nothing more to do, we are initializing
+ if self.Initializing:
+ self.InitialTransactions.append(txn)
+ return
+
+ # if we already have the transaction there is nothing to do
+ if txn.Identifier in self.TransactionStore:
+ assert self.TransactionStore[txn.Identifier]
+ return
+
+ # add it to the transaction store
+ txn.Status = transaction.Status.pending
+ self.TransactionStore[txn.Identifier] = txn
+ if txn.add_to_pending():
+ self.PendingTransactions[txn.Identifier] = True
+
+ # if this is a transaction we requested, then remove it from the list
+ # and look for any blocks that might be completed as a result of
+ # processing the transaction
+ if txn.Identifier in self.RequestedTransactions:
+ logger.info('catching up on old transaction %s',
+ txn.Identifier[:8])
+ del self.RequestedTransactions[txn.Identifier]
+
+ blockids = []
+ for blockid in self.PendingBlockIDs:
+ if txn.Identifier in self.BlockStore[blockid].TransactionIDs:
+ blockids.append(blockid)
+
+ for blockid in blockids:
+ self._handleblock(self.BlockStore[blockid])
+
+ # there is a chance the we deferred creating a transaction block
+ # because there were insufficient transactions, this is where we check
+ # to see if there are now enough to run the validation algorithm
+ if not self.PendingTransactionBlock:
+ self.PendingTransactionBlock = self.build_transaction_block()
+
+ def commit_transaction_block(self, tblock):
+ """Commits a block of transactions to the chain.
+
+ Args:
+ tblock (Transaction.TransactionBlock): A block of
+ transactions which nodes agree to commit.
+ """
+ logger.debug('processing incoming transaction block %s',
+ tblock.Identifier[:8])
+
+ # Make sure this is a valid block, for now this will just check the
+ # signature... more later
+ if not tblock.verify_signature():
+ logger.warn('invalid block %s recieved from %s', tblock.Identifier,
+ tblock.OriginatorID)
+ return
+
+ # Don't do anything with incoming blocks if we are initializing, wait
+ # for the connections to be fully established
+ if self.Initializing:
+ logger.debug('adding block %s to the pending queue',
+ tblock.Identifier[:8])
+
+ # this is an ugly hack to ensure that we don't treat this as a
+ # duplicate while processing the initial blocks; this can happen,
+ # for example, when we are restoring from saved state. having it in
+ # the block store means we didn't need to transfer it during ledger
+ # transfer, a much better approach at a future time would be to
+ # circumvent the commit logic and just add the block to the chain.
+ # However that does mean that we have to TRUST our peer which is
+ # not necessarily such a good thing...
+ if tblock.Identifier in self.BlockStore:
+ del self.BlockStore[tblock.Identifier]
+
+ self.InitialBlockList.append(tblock)
+ return
+
+ # If this is a block we requested, then remove it from the list
+ if tblock.Identifier in self.RequestedBlocks:
+ del self.RequestedBlocks[tblock.Identifier]
+
+ # Make sure that we have not already processed this block
+ if tblock.Identifier in self.BlockStore:
+ logger.info('found previously commited block %s',
+ tblock.Identifier[:8])
+ return
+
+ # Make sure we initialize the state of the block
+ tblock.Status = transaction_block.Status.incomplete
+
+ # Add this block to block pool, mark as orphaned until it is commited
+ self.PendingBlockIDs.add(tblock.Identifier)
+ self.BlockStore[tblock.Identifier] = tblock
+
+ self._handleblock(tblock)
+
+ def claim_transaction_block(self, block):
+ """Fires the onClaimBlock event handler and locally commits the
+ transaction block.
+
+ Args:
+ block (Transaction.TransactionBlock): A block of
+ transactions to claim.
+ """
+ # fire the event handler for claiming the transaction block
+ self.onClaimBlock.fire(self, block)
+ self.commit_transaction_block(block)
+
+ def request_missing_block(self, blockid, exceptions=[], request=None):
+ """Requests neighbors to send a transaction block.
+
+ This method is called when one block references another block
+ that is not currently in the local cache. Only send the request
+ periodically to avoid spamming the network with duplicate requests.
+
+ Args:
+ blockid (str): The identifier of the missing block.
+ exceptions (list): Identifiers of nodes we know don't have
+ the block.
+ request (Message): A previously initialized message for
+ sending the request; avoids duplicates.
+ """
+ now = time.time()
+
+ if (blockid in self.RequestedBlocks
+ and now < self.RequestedBlocks[blockid]):
+ return
+ self.RequestedBlocks[blockid] = now + self.MissingRequestInterval
+
+ # if the request for the missing block came from another node, then
+ # we need to reuse the request or we'll process multiple copies
+ if not request:
+ request = transaction_block_message.BlockRequestMessage(
+ {'BlockID': blockid})
+ self.forward_message(request, exceptions=exceptions)
+ else:
+ self.forward_message(request,
+ exceptions=exceptions,
+ initialize=False)
+
+ def request_missing_txn(self, txnid, exceptions=[], request=None):
+ """Requests that neighbors send a transaction.
+
+ This method is called when a block references a transaction
+ that is not currently in the local cache. Only send the request
+ periodically to avoid spamming the network with duplicate requests.
+
+ Args:
+ txnid (str): The identifier of the missing transaction.
+ exceptions (list): Identifiers of nodes we know don't have
+ the block.
+ request (Message): A previously initialized message for
+ sending the request; avoids duplicates.
+ """
+ now = time.time()
+
+ if (txnid in self.RequestedTransactions
+ and now < self.RequestedTransactions[txnid]):
+ return
+ self.RequestedTransactions[txnid] = now + self.MissingRequestInterval
+
+ # if the request for the missing block came from another node, then
+ # we need to reuse the request or we'll process multiple copies
+ if not request:
+ request = transaction_message.TransactionRequestMessage(
+ {'TransactionID': txnid})
+ self.forward_message(request, exceptions=exceptions)
+ else:
+ self.forward_message(request,
+ exceptions=exceptions,
+ initialize=False)
+
+ def build_transaction_block(self, force=False):
+ """Builds the next transaction block for the ledger.
+
+ Note:
+ This method will generally be overridden by derived classes.
+
+ Args:
+ force (bool): Whether to force the creation of the
+ initial block.
+ """
+
+ self.onBuildBlock.fire(self, None)
+
+ def handle_advance(self, tblock):
+ """Handles the case where we are attempting to commit a block that
+ advances the current block chain.
+
+ Args:
+ tblock (Transaction.TransactionBlock): A block of
+ transactions to advance.
+ """
+ assert tblock.Status == transaction_block.Status.valid
+
+ self._commitblock(tblock)
+ self.PendingTransactionBlock = self.build_transaction_block()
+
+ def handle_fork(self, tblock):
+ """Handle the case where we are attempting to commit a block
+ that is not connected to the current block chain.
+
+ Args:
+ tblock (Transaction.TransactionBlock): A disconnected block.
+ """
+
+ assert tblock.Status == transaction_block.Status.valid
+
+ logger.info(
+ 'received a disconnected block %s from %s with previous id %s, '
+ 'expecting %s',
+ tblock.Identifier[:8], self._id2name(tblock.OriginatorID),
+ tblock.PreviousBlockID[:8], self.MostRecentCommitedBlockID[:8])
+
+ # First see if the chain rooted in tblock is the one we should use, if
+ # it is not, then we are building on the correct block and nothing
+ # needs to change
+
+ assert self.MostRecentCommitedBlockID != common.NullIdentifier
+ if cmp(tblock, self.MostRecentCommitedBlock) < 0:
+ logger.info('existing chain is the valid one')
+ return
+
+ logger.info('new chain is the valid one, replace the current chain')
+
+ # now find the root of the fork, first handle the common case of
+ # not looking very deeply for the common block, then handle the
+ # expensive case of searching the entire chain
+ forkid = self._findfork(tblock, 5)
+ if not forkid:
+ forkid = self._findfork(tblock, 0)
+
+ assert forkid
+
+ # at this point we have a new chain that is longer than the current
+ # one, need to move the blocks in the current chain that follow the
+ # fork into the orphaned pool and then move the blocks from the new
+ # chain into the commited pool and finally rebuild the global store
+
+ # move the previously commited blocks into the orphaned list
+ self._decommitblockchain(forkid)
+
+ # move the new blocks from the orphaned list to the commited list
+ self._commitblockchain(tblock.Identifier, forkid)
+ self.PendingTransactionBlock = self.build_transaction_block()
+
+ #
+ # UTILITY FUNCTIONS
+ #
+
+ def _handleblock(self, tblock):
+ """
+ Attempt to add a block to the chain.
+ """
+
+ assert tblock.Identifier in self.PendingBlockIDs
+
+ # initialize the state of this block
+ self.BlockStore[tblock.Identifier] = tblock
+
+ # if this block is the genesis block then we can assume that
+ # it meets all criteria for dependent blocks
+ if tblock.PreviousBlockID != common.NullIdentifier:
+ # first test... do we have the previous block, if not then this
+ # block remains incomplete awaiting the arrival of the predecessor
+ pblock = self.BlockStore.get(tblock.PreviousBlockID)
+ if not pblock:
+ self.request_missing_block(tblock.PreviousBlockID)
+ return
+
+ # second test... is the previous block invalid, if so then this
+ # block will never be valid & can be completely removed from
+ # consideration, for now I'm not removing the block from the
+ # block store though we could substitute a check for the previous
+ # block in the invalid block list
+ if pblock.Status == transaction_block.Status.invalid:
+ self.PendingBlockIDs.discard(tblock.Identifier)
+ self.InvalidBlockIDs.add(tblock.Identifier)
+ tblock.Status = transaction_block.Status.invalid
+ self.BlockStore[tblock.Identifier] = tblock
+ return
+
+ # third test... is the previous block complete, if not then
+ # this block cannot be complete and there is nothing else to do
+ # until the missing transactions come in, note that we are not
+ # requesting missing transactions at this point
+ if pblock.Status == transaction_block.Status.incomplete:
+ return
+
+ # fourth test... check for missing transactions in this block, if
+ # any are missing, request them and then save this block for later
+ # processing
+ missing = tblock.missing_transactions(self)
+ if missing:
+ for txnid in missing:
+ self.request_missing_txn(txnid)
+ return
+
+ # at this point we know that the block is complete
+ tblock.Status = transaction_block.Status.complete
+ self.BlockStore[tblock.Identifier] = tblock
+
+ # fifth test... run the checks for a valid block, generally these are
+ # specific to the various transaction families or consensus mechanisms
+ if (not tblock.is_valid(self)
+ or not self.onBlockTest.fire(self, tblock)):
+ logger.debug('block test failed for %s', tblock.Identifier[:8])
+ self.PendingBlockIDs.discard(tblock.Identifier)
+ self.InvalidBlockIDs.add(tblock.Identifier)
+ tblock.Status = transaction_block.Status.invalid
+ self.BlockStore[tblock.Identifier] = tblock
+ return
+
+ # at this point we know that the block is valid
+ tblock.Status = transaction_block.Status.valid
+ tblock.CommitTime = time.time() - self.StartTime
+ tblock.update_transaction_depth(self)
+
+ # time to apply the transactions in the block to get a new state
+ self.GlobalStoreMap.commit_block_store(tblock.Identifier,
+ self._applyblock(tblock))
+ self.BlockStore[tblock.Identifier] = tblock
+
+ # if the previous block was in the frontier set it is no
+ # longer, the new block is definitely in the frontier at
+ # least for the moment, that may change if this block
+ # connects other orphaned blocks
+ self.FrontierBlockIDs.discard(tblock.PreviousBlockID)
+ self.FrontierBlockIDs.add(tblock.Identifier)
+ self.PendingBlockIDs.discard(tblock.Identifier)
+
+ # and now check to see if we should start to use this block as the one
+ # on which we build a new chain
+
+ # handle the easy, common case here where the new block extends the
+ # current chain
+ if tblock.PreviousBlockID == self.MostRecentCommitedBlockID:
+ self.handle_advance(tblock)
+ else:
+ self.handle_fork(tblock)
+
+ self._cleantransactionblocks()
+
+ # last thing is to check the other orphaned blocks to see if this
+ # block connects one to the chain, if a new block is connected to
+ # the chain then we need to go through the entire process again with
+ # the newly connected block
+ blockids = set()
+ for blockid in self.PendingBlockIDs:
+ if self.BlockStore[blockid].PreviousBlockID == tblock.Identifier:
+ blockids.add(blockid)
+
+ for blockid in blockids:
+ self._handleblock(self.BlockStore[blockid])
+
+ def _commitblockchain(self, blockid, forkid):
+ """
+ commit a chain of block starting with the forked block through the head
+ of the chain
+
+ Args:
+ blockid (UUID) -- head of the chain to commit
+ forkid (UUID) -- point where the fork occurred
+ """
+ if blockid == forkid:
+ return
+
+ block = self.BlockStore[blockid]
+ self._commitblockchain(block.PreviousBlockID, forkid)
+ self._commitblock(block)
+
+ def _commitblock(self, tblock):
+ """
+ Add a block to the committed chain, this function extends the
+ chain by updating the most recent commited block field
+
+ Args:
+ tblock (Transaction.TransactionBlock) -- block of transactions to
+ be commited
+ """
+
+ logger.info('commit block %s from %s with previous id %s',
+ tblock.Identifier[:8], self._id2name(tblock.OriginatorID),
+ tblock.PreviousBlockID[:8])
+
+ assert tblock.Status == transaction_block.Status.valid
+
+ # Remove all of the newly commited transactions from the pending list
+ # and put them in the commited list
+ for txnid in tblock.TransactionIDs:
+ if txnid in self.PendingTransactions:
+ del self.PendingTransactions[txnid]
+
+ txn = self.TransactionStore[txnid]
+ txn.Status = transaction.Status.committed
+ txn.InBlock = tblock.Identifier
+ self.TransactionStore[txnid] = txn
+
+ # Update the head of the chain
+ self.MostRecentCommitedBlockID = tblock.Identifier
+ self.ChainStore['MostRecentBlockID'] = self.MostRecentCommitedBlockID
+
+ # Update stats
+ self.JournalStats.CommitedBlockCount.increment()
+ self.JournalStats.CommitedTxnCount.increment(len(
+ tblock.TransactionIDs))
+
+ # fire the event handler for block commit
+ self.onCommitBlock.fire(self, tblock)
+
+ def _decommitblockchain(self, forkid):
+ """
+ decommit blocks from the head of the chain through the forked block
+
+ Args:
+ forkid (UUID) -- identifier of the block where the fork occurred
+ """
+ blockid = self.MostRecentCommitedBlockID
+ while blockid != forkid:
+ self._decommitblock()
+ blockid = self.MostRecentCommitedBlockID
+
+ def _decommitblock(self):
+ """
+ Move the head of the block chain from the commited pool to the orphaned
+ pool and move all transactions in the block back into the pending
+ transaction list.
+ """
+ blockid = self.MostRecentCommitedBlockID
+ block = self.BlockStore[blockid]
+ assert block.Status == transaction_block.Status.valid
+
+ # fire the event handler for block decommit
+ self.onDecommitBlock.fire(self, block)
+
+ # move the head of the chain back
+ self.MostRecentCommitedBlockID = block.PreviousBlockID
+ self.ChainStore['MostRecentBlockID'] = self.MostRecentCommitedBlockID
+
+ # this bizarre bit of code is intended to preserve the ordering of
+ # transactions, where all commited transactions occur before pending
+ # transactions
+ pending = OrderedDict()
+ for txnid in block.TransactionIDs:
+ # there is a chance that this block is incomplete and some of the
+ # transactions have not arrived, don't put transactions into
+ # pending if we dont have the transaction
+ txn = self.TransactionStore.get(txnid)
+ if txn:
+ txn.Status = transaction.Status.pending
+ self.TransactionStore[txnid] = txn
+
+ if txn.add_to_pending():
+ pending[txnid] = True
+
+ pending.update(self.PendingTransactions)
+ self.PendingTransactions = pending
+
+ # update stats
+ self.JournalStats.CommitedBlockCount.increment(-1)
+ self.JournalStats.CommitedTxnCount.increment(-len(
+ block.TransactionIDs))
+
+ def _applyblock(self, tblock):
+ """
+ apply transactions to the previous block's global store to create a new
+ version of the store
+
+ Args:
+ tblock (Transaction.TransactionBlock) -- block of transactions to
+ apply
+ Returns:
+ GlobalStore
+ """
+
+ assert tblock.Status == transaction_block.Status.valid
+
+ # make a copy of the store from the previous block, the previous
+ # block must be complete if this block is complete
+ store = self.GlobalStoreMap.get_block_store(
+ tblock.PreviousBlockID).clone_block()
+
+ # apply the transactions
+ try:
+ for txnid in tblock.TransactionIDs:
+ txn = self.TransactionStore[txnid]
+ # When we move InBlock to a set of blocks that contain the
+ # transaction then it should be updated here, for now the
+ # update happens only when one of the blocks containing the
+ # transaction is actually committed
+ # txn.InBlock.add(tblock.Identifier)
+ # self.TransactionStore[txnid] = txn
+ txn.apply(store.get_transaction_store(txn.TransactionTypeName))
+ except:
+ logger.critical(
+ 'failed to apply transactions from completed block %s, this '
+ 'should not happen',
+ tblock.Identifier[:8],
+ exc_info=True)
+
+ # ROLLBACK updates to tranaction status!!
+
+ return None
+
+ return store
+
+ def _findfork(self, tblock, depth):
+ """
+ Find most recent predecessor of tblock that is in the committed
+ chain, searching through at most depth blocks
+
+ :param tblock PoetTransactionBlock:
+ :param depth int: depth in the current chain to search, 0 implies all
+ """
+
+ blockids = set(self.commited_block_ids(depth))
+ forkid = tblock.PreviousBlockID
+ while True:
+ if forkid == common.NullIdentifier or forkid in blockids:
+ return forkid
+
+ assert forkid in self.BlockStore
+ forkid = self.BlockStore[forkid].PreviousBlockID
+
+ return None
+
+ def _preparetransactionlist(self, maxcount=0):
+ """
+ Prepare an ordered list of valid transactions that can be included in
+ the next consensus round
+
+ Returns:
+ list of Transaction.Transaction
+ """
+
+ # generate a list of valid transactions to place in the new block
+ addtxns = []
+ deltxns = []
+ store = self.GlobalStore.clone_block()
+ for txnid in self.PendingTransactions.iterkeys():
+ txn = self.TransactionStore[txnid]
+ if txn:
+ self._preparetransaction(addtxns, deltxns, store, txn)
+
+ if maxcount and len(addtxns) >= maxcount:
+ break
+
+ # as part of the process, we may identify transactions that are invalid
+ # so go ahead and get rid of them, since these had all dependencies met
+ # we know that they will never be valid
+ for txnid in deltxns:
+ logger.info('found a transaction that will never apply; %s',
+ txnid[:8])
+ if txnid in self.TransactionStore:
+ del self.TransactionStore[txnid]
+ if txnid in self.PendingTransactions:
+ del self.PendingTransactions[txnid]
+
+ return addtxns
+
+ def _preparetransaction(self, addtxns, deltxns, store, txn):
+ """
+ Determine if a particular transaction is valid
+
+ Args:
+ addtxns (list of Transaction.Transaction) -- transaction to be
+ added to the current block
+ deltxns (list of Transaction.Transaction) -- invalid transactions
+ store (GlobalStore) -- current global store
+ txn -- the transaction to be tested
+ Returns:
+ True if the transaction is valid
+ """
+
+ logger.debug('add transaction %s with id %s', str(txn),
+ txn.Identifier[:8])
+
+ # Because the dependencies may reorder transactions in the block
+ # in a way that is different from the arrival order, this transaction
+ # might already be in the block
+ if txn.Identifier in addtxns:
+ return True
+
+ # First step in adding the transaction to the list is to make
+ # sure that all dependent transactions are in the list already
+ ready = True
+ for dependencyID in txn.Dependencies:
+ logger.debug('check dependency %s of transaction %s',
+ dependencyID[:8], txn.Identifier[:8])
+
+ # check to see if the dependency has already been commited
+ if (dependencyID in self.TransactionStore
+ and (self.TransactionStore[dependencyID].Status ==
+ transaction.Status.committed)):
+ continue
+
+ # check to see if the dependency is already in this block
+ if dependencyID in addtxns:
+ continue
+
+ # check to see if the dependency is among the transactions to be
+ # deleted, if so then this transaction will never be valid and we
+ # can just get rid of it
+ if dependencyID in deltxns:
+ logger.info('transaction %s depends on deleted transaction %s',
+ txn.Identifier[:8], dependencyID[:8])
+ deltxns.append(txn.Identifier)
+ ready = False
+ continue
+
+ # recurse into the dependency, note that we need to make sure there
+ # are no loops in the dependencies but not doing that right now
+ deptxn = self.TransactionStore.get(dependencyID)
+ if deptxn and self._preparetransaction(addtxns, deltxns, store,
+ deptxn):
+ continue
+
+ # at this point we cannot find the dependency so send out a request
+ # for it and wait, we should set a timer on this transaction so we
+ # can just throw it away if the dependencies cannot be met
+ ready = False
+ self.request_missing_txn(dependencyID)
+
+ # if all of the dependencies have not been met then there isn't any
+ # point in continuing on so bail out
+ if not ready:
+ return False
+
+ # after all that work... we know the dependencies are met, so see if
+ # the transaction is valid, that is that all of the preconditions
+ # encoded in the transaction itself are met
+ txnstore = store.get_transaction_store(txn.TransactionTypeName)
+ if txn.is_valid(txnstore):
+ logger.debug('txn with id %s is valid, adding to block',
+ txn.Identifier[:8])
+ addtxns.append(txn.Identifier)
+ txn.apply(txnstore)
+ return True
+
+ # because we have all of the dependencies but the transaction is still
+ # invalid we know that this transaction is broken and we can simply
+ # throw it away
+ logger.warn(
+ 'transaction %s with id %s is not valid for this block, dropping',
+ str(txn), txn.Identifier[:8])
+ logger.info(common.pretty_print_dict(txn.dump()))
+ deltxns.append(txn.Identifier)
+ return False
+
+ def _cleantransactionblocks(self):
+ """
+ _cleantransactionblocks -- for blocks and transactions that are with
+ high probability no longer going to change, clean out the bulk of the
+ memory used to store the block and the corresponding transactions
+ """
+
+ self.TransactionStore.sync()
+ self.BlockStore.sync()
+
+ # with the state storage, we can flatten old blocks to reduce memory
+ # footprint, they can always be recovered from persistent storage
+ # later on, however, the flattening process increases memory usage
+ # so we don't want to do it too often, the code below keeps the number
+ # of blocks kept in memory less than 2 * self.MaximumBlocksToKeep
+ if self.MostRecentCommitedBlock.BlockNum \
+ % self.MaximumBlocksToKeep == 0:
+ depth = 0
+ blockid = self.MostRecentCommitedBlockID
+ while (blockid != common.NullIdentifier
+ and depth < self.MaximumBlocksToKeep):
+ blockid = self.BlockStore[blockid].PreviousBlockID
+ depth += 1
+
+ if blockid != common.NullIdentifier:
+ logger.debug('flatten storage for block %s', blockid)
+ self.GlobalStoreMap.flatten_block_store(blockid)
+
+ def _initledgerstats(self):
+ self.JournalStats = stats.Stats(self.LocalNode.Name, 'ledger')
+ self.JournalStats.add_metric(stats.Counter('CommitedBlockCount'))
+ self.JournalStats.add_metric(stats.Counter('CommitedTxnCount'))
+ self.JournalStats.add_metric(stats.Sample(
+ 'PendingBlockCount', lambda: self.PendingBlockCount))
+ self.JournalStats.add_metric(stats.Sample(
+ 'PendingTxnCount',
+ lambda: self.PendingTxnCount))
+
+ self.StatDomains['ledger'] = self.JournalStats
+
+ self.JournalConfigStats = stats.Stats(self.LocalNode.Name,
+ 'ledgerconfig')
+ self.JournalConfigStats.add_metric(
+ stats.Sample('MinimumTransactionsPerBlock',
+ lambda: self.MinimumTransactionsPerBlock))
+ self.JournalConfigStats.add_metric(
+ stats.Sample('MaximumTransactionsPerBlock',
+ lambda: self.MaximumTransactionsPerBlock))
+
+ self.StatDomains['ledgerconfig'] = self.JournalConfigStats
+
+ def _id2name(self, nodeid):
+ if nodeid in self.NodeMap:
+ return str(self.NodeMap[nodeid])
+
+ if nodeid == self.LocalNode.Identifier:
+ return str(self.LocalNode)
+
+ store = self.GlobalStore.TransactionStores[
+ '/EndpointRegistryTransaction']
+ if nodeid in store and 'Name' in store[nodeid]:
+ return str(store[nodeid]['Name'])
+
+ return nodeid[:8]
diff --git a/journal/messages/__init__.py b/journal/messages/__init__.py
new file mode 100644
index 0000000000..226d4561fa
--- /dev/null
+++ b/journal/messages/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['journal_debug', 'journal_transfer', 'transaction_block_message',
+ 'transaction_message']
diff --git a/journal/messages/journal_debug.py b/journal/messages/journal_debug.py
new file mode 100644
index 0000000000..e9104ea1dc
--- /dev/null
+++ b/journal/messages/journal_debug.py
@@ -0,0 +1,165 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import time
+
+from gossip import message
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(journal):
+ """Registers the message handlers that every journal should
+ support.
+
+ Args:
+ journal (Journal): The journal to register the message
+ handlers against.
+ """
+ journal.register_message_handler(DumpJournalBlocksMessage,
+ _dumpjournalblockshandler)
+ journal.register_message_handler(DumpJournalValueMessage,
+ _dumpjournalvaluehandler)
+
+
+def _dumpjournalblockshandler(msg, journal):
+ logger.info('dumping block list for %s', journal.LocalNode)
+
+ identifier = "{0}, {1:0.2f}, {2}".format(journal.LocalNode, time.time(),
+ msg.Identifier[:8])
+
+ blockids = journal.commited_block_ids(msg.Count)
+ for blkid in blockids:
+ block = journal.BlockStore[blkid]
+ logger.info('block, %s, %s', identifier, str(block))
+
+
+class DumpJournalBlocksMessage(message.Message):
+ """Dump journal blocks messages represent the message format
+ for exchanging dump journal blocks messages.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is
+ a system message.
+ IsForward (bool): Whether or not this message is forwarded.
+ IsReliable (bool): Whether or not this message should
+ use reliable delivery.
+ Count (int): The number of journal blocks to dump.
+ """
+ MessageType = "/" + __name__ + "/DumpJournalBlocks"
+
+ def __init__(self, minfo={}):
+ """Constructor for DumpJournalBlocksMessage class.
+
+ Args:
+ minfo (dict): A dict containing intialization values
+ for a DumpJournalBlocksMessage.
+ """
+ super(DumpJournalBlocksMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.Count = minfo.get("Count", 0)
+
+ def dump(self):
+ """Returns a dict containing information about the
+ DumpJournalBlocksMessage.
+
+ Returns:
+ dict: A dict containing information about the dump
+ journal blocks message.
+ """
+ result = super(DumpJournalBlocksMessage, self).dump()
+ result['Count'] = self.Count
+
+ return result
+
+
+def _dumpjournalvaluehandler(msg, journal):
+ key = msg.Name
+ tname = msg.TransactionType
+
+ msgid = msg.Identifier[:8]
+ localnode = journal.LocalNode
+ remotenode = journal.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8])
+
+ logger.debug("received journal lookup for key <%s> from %s", key,
+ remotenode)
+
+ cmn = "{0}, {1}, {2}, {3}".format(localnode, msgid, key, tname)
+
+ # if the current journal contains incomplete blocks then we won't have any
+ # global store
+ if not journal.GlobalStore:
+ logger.info("keylookup, %s, incomplete, 0", cmn)
+ elif tname not in journal.GlobalStore.TransactionStores:
+ logger.info("keylookup, %s, unknown type", cmn)
+ elif key not in journal.GlobalStore.TransactionStores[tname]:
+ logger.info("keylookup, %s, no value, 0", cmn)
+ else:
+ logger.info("keylookup, %s, known, %s", cmn,
+ str(journal.GlobalStore.TransactionStores[tname][key]))
+
+
+class DumpJournalValueMessage(message.Message):
+ """Represents the message format for exchanging dump journal
+ value messages.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is
+ a system message.
+ IsForward (bool): Whether or not this message is forwarded.
+ IsReliable (bool): Whether or not this message should
+ use reliable delivery.
+ TransactionType (type): The type of transaction.
+ Name (str): The name of the transaction.
+ """
+ MessageType = "/" + __name__ + "/DumpJournalValue"
+
+ def __init__(self, minfo={}):
+ """Constructor for the DumpJournalValueMessage class.
+
+ Args:
+ minfo (dict): A dict containing initial values for
+ the new DumpJournalValueMessage.
+ """
+ super(DumpJournalValueMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = True
+ self.IsReliable = True
+
+ self.TransactionType = minfo.get('TransactionType')
+ self.Name = minfo.get('Name')
+
+ def dump(self):
+ """Returns a dict containing information about the
+ DumpJournalValueMessage.
+
+ Returns:
+ dict: A dict containing information about the dump
+ journal value message.
+ """
+ result = super(DumpJournalValueMessage, self).dump()
+
+ result['TransactionType'] = self.TransactionType
+ result['Name'] = self.Name
+
+ return result
diff --git a/journal/messages/journal_transfer.py b/journal/messages/journal_transfer.py
new file mode 100644
index 0000000000..69ec832e93
--- /dev/null
+++ b/journal/messages/journal_transfer.py
@@ -0,0 +1,348 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from gossip import message
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(journal):
+ """
+ Register the message handlers that every journal should support.
+ """
+ journal.register_message_handler(BlockListRequestMessage,
+ _blocklistrequesthandler)
+ journal.register_message_handler(UncommitedListRequestMessage,
+ _uncommitedlistrequesthandler)
+ journal.register_message_handler(BlockRequestMessage, _blockrequesthandler)
+ journal.register_message_handler(TransactionRequestMessage,
+ _txnrequesthandler)
+
+
+class BlockListRequestMessage(message.Message):
+ MessageType = "/" + __name__ + "/BlockListRequest"
+
+ def __init__(self, minfo={}):
+ super(BlockListRequestMessage, self).__init__(minfo)
+ self.BlockListIndex = minfo.get('BlockListIndex', 0)
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(BlockListRequestMessage, self).dump()
+ result['BlockListIndex'] = self.BlockListIndex
+ return result
+
+
+def _blocklistrequesthandler(msg, journal):
+ source = journal.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8])
+ logger.debug(
+ 'processing incoming blocklist request for journal transfer from %s',
+ source)
+
+ if msg.OriginatorID == journal.LocalNode.Identifier:
+ logger.info('node %s received its own request, ignore',
+ journal.LocalNode.Identifier[:8])
+ return
+
+ if journal.Initializing:
+ src = journal.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8])
+ logger.warn(
+ 'recieved blocklist transfer request from %s prior to completing '
+ 'initialization',
+ src)
+ journal.send_message(TransferFailedMessage(), msg.OriginatorID)
+ return
+
+ reply = BlockListReplyMessage()
+ reply.InReplyTo = msg.Identifier
+ reply.BlockListIndex = msg.BlockListIndex
+
+ blockids = journal.commited_block_ids()
+ blockids.reverse()
+
+ index = msg.BlockListIndex
+ if index < len(blockids):
+ reply.BlockIDs = blockids[index:index + 100]
+
+ logger.debug('sending %d commited blocks to %s for request %s',
+ len(reply.BlockIDs), source, msg.Identifier[:8])
+ journal.send_message(reply, msg.OriginatorID)
+
+
+class BlockListReplyMessage(message.Message):
+ MessageType = "/" + __name__ + "/BlockListReply"
+
+ def __init__(self, minfo={}):
+ super(BlockListReplyMessage, self).__init__(minfo)
+
+ self.InReplyTo = minfo.get('InReplyTo')
+ self.BlockListIndex = minfo.get('BlockListIndex', 0)
+ self.BlockIDs = minfo.get('BlockIDs', [])
+ self.UncommitedTxnIDs = minfo.get('UncommitedTxnIDs', [])
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(BlockListReplyMessage, self).dump()
+ result['BlockListIndex'] = self.BlockListIndex
+
+ result['BlockIDs'] = []
+ for blkid in self.BlockIDs:
+ result['BlockIDs'].append(blkid)
+
+ result['UncommitedTxnIDs'] = []
+ for txnid in self.UncommitedTxnIDs:
+ result['UncommitedTxnIDs'].append(txnid)
+
+ result['InReplyTo'] = self.InReplyTo
+ return result
+
+
+class UncommitedListRequestMessage(message.Message):
+ MessageType = "/" + __name__ + "/UncommitedListRequest"
+
+ def __init__(self, minfo={}):
+ super(UncommitedListRequestMessage, self).__init__(minfo)
+ self.TransactionListIndex = minfo.get('TransactionListIndex', 0)
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(UncommitedListRequestMessage, self).dump()
+ result['TransactionListIndex'] = self.TransactionListIndex
+ return result
+
+
+def _uncommitedlistrequesthandler(msg, journal):
+ source = journal.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8])
+ logger.debug(
+ 'processing incoming uncommited list request for journal transfer '
+ 'from %s',
+ source)
+
+ if msg.OriginatorID == journal.LocalNode.Identifier:
+ logger.info('node %s received its own request, ignore',
+ journal.LocalNode.Identifier[:8])
+ return
+
+ if journal.Initializing:
+ src = journal.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8])
+ logger.warn(
+ 'recieved uncommited list transfer request from %s prior to '
+ 'completing initialization',
+ src)
+ journal.send_message(TransferFailedMessage(), msg.OriginatorID)
+ return
+
+ reply = UncommitedListReplyMessage()
+ reply.InReplyTo = msg.Identifier
+ reply.TransactionListIndex = msg.TransactionListIndex
+
+ index = msg.TransactionListIndex
+ txns = journal.PendingTransactions.keys()
+ if index < len(txns):
+ reply.TransactionIDs = txns[index:index + 100]
+
+ logger.debug('sending %d uncommited txns to %s for request %s',
+ len(reply.TransactionIDs), source, msg.Identifier[:8])
+ journal.send_message(reply, msg.OriginatorID)
+
+
+class UncommitedListReplyMessage(message.Message):
+ MessageType = "/" + __name__ + "/UncommitedListReply"
+
+ def __init__(self, minfo={}):
+ super(UncommitedListReplyMessage, self).__init__(minfo)
+
+ self.InReplyTo = minfo.get('InReplyTo')
+ self.TransactionListIndex = minfo.get('TransactionListIndex', 0)
+ self.TransactionIDs = minfo.get('TransactionIDs', [])
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(UncommitedListReplyMessage, self).dump()
+ result['TransactionListIndex'] = self.TransactionListIndex
+
+ result['TransactionIDs'] = []
+ for blkid in self.TransactionIDs:
+ result['TransactionIDs'].append(blkid)
+
+ result['InReplyTo'] = self.InReplyTo
+ return result
+
+
+class BlockRequestMessage(message.Message):
+ MessageType = "/" + __name__ + "/BlockRequest"
+
+ def __init__(self, minfo={}):
+ super(BlockRequestMessage, self).__init__(minfo)
+ self.BlockID = minfo.get('BlockID')
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(BlockRequestMessage, self).dump()
+ result['BlockID'] = self.BlockID
+
+ return result
+
+
+def _blockrequesthandler(msg, journal):
+ logger.debug('processing incoming block request for journal transfer')
+
+ if journal.Initializing:
+ src = journal.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8])
+ logger.warn(
+ 'recieved block transfer request from %s prior to completing '
+ 'initialization',
+ src)
+ journal.send_message(TransferFailedMessage(), msg.OriginatorID)
+ return
+
+ reply = BlockReplyMessage()
+ reply.InReplyTo = msg.Identifier
+ if msg.BlockID in journal.BlockStore:
+ blk = journal.BlockStore[msg.BlockID]
+ bmsg = blk.build_message()
+ reply.TransactionBlockMessage = bmsg.dump()
+ else:
+ logger.warn('request for unknown block, %s', msg.BlockID[:8])
+ journal.send_message(TransferFailedMessage(), msg.OriginatorID)
+ return
+
+ journal.send_message(reply, msg.OriginatorID)
+
+
+class BlockReplyMessage(message.Message):
+ MessageType = "/" + __name__ + "/BlockReply"
+
+ def __init__(self, minfo={}):
+ super(BlockReplyMessage, self).__init__(minfo)
+
+ # TransactionBlockMessage is the encapsulated, transaction block
+ # type specific message that lets us handle multiple transaction
+ # block types
+ self.TransactionBlockMessage = minfo.get('TransactionBlockMessage', {})
+ self.InReplyTo = minfo.get('InReplyTo')
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(BlockReplyMessage, self).dump()
+ result['TransactionBlockMessage'] = self.TransactionBlockMessage
+ result['InReplyTo'] = self.InReplyTo
+
+ return result
+
+
+class TransactionRequestMessage(message.Message):
+ MessageType = "/" + __name__ + "/TransactionRequest"
+
+ def __init__(self, minfo={}):
+ super(TransactionRequestMessage, self).__init__(minfo)
+ self.TransactionID = minfo[
+ 'TransactionID'] if 'TransactionID' in minfo else []
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(TransactionRequestMessage, self).dump()
+ result['TransactionID'] = self.TransactionID
+
+ return result
+
+
+def _txnrequesthandler(msg, journal):
+ logger.debug(
+ 'processing incoming transaction request for journal transfer')
+
+ if journal.Initializing:
+ src = journal.NodeMap.get(msg.OriginatorID, msg.OriginatorID[:8])
+ logger.warn(
+ 'recieved transaction transfer request from %s prior to '
+ 'completing initialization',
+ src)
+ journal.send_message(TransferFailedMessage(), msg.OriginatorID)
+ return
+
+ reply = TransactionReplyMessage()
+ reply.InReplyTo = msg.Identifier
+ if msg.TransactionID in journal.TransactionStore:
+ txn = journal.TransactionStore[msg.TransactionID]
+ tmsg = txn.build_message()
+ reply.TransactionMessage = tmsg.dump()
+ else:
+ logger.warn('request for unknown transaction, %s',
+ msg.TransactionID[:8])
+ journal.send_message(TransferFailedMessage(), msg.OriginatorID)
+ return
+
+ journal.send_message(reply, msg.OriginatorID)
+
+
+class TransactionReplyMessage(message.Message):
+ MessageType = "/" + __name__ + "/TransactionReply"
+
+ def __init__(self, minfo={}):
+ super(TransactionReplyMessage, self).__init__(minfo)
+
+ # TransactionMessage is the encapsulated, transaction-type specific
+ # message that lets us handle multiple transaction types
+ self.TransactionMessage = minfo.get('TransactionMessage', {})
+ self.InReplyTo = minfo.get('InReplyTo')
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(TransactionReplyMessage, self).dump()
+ result['TransactionMessage'] = self.TransactionMessage
+ result['InReplyTo'] = self.InReplyTo
+
+ return result
+
+
+class TransferFailedMessage(message.Message):
+ MessageType = "/" + __name__ + "/TransferFailed"
+
+ def __init__(self, minfo={}):
+ super(TransferFailedMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = True
+ self.IsForward = False
+ self.IsReliable = True
+
+ def dump(self):
+ result = super(TransferFailedMessage, self).dump()
+ return result
diff --git a/journal/messages/transaction_block_message.py b/journal/messages/transaction_block_message.py
new file mode 100644
index 0000000000..3e9e6bff51
--- /dev/null
+++ b/journal/messages/transaction_block_message.py
@@ -0,0 +1,153 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+import logging
+
+from gossip import message
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(journal):
+ """Registers the transaction block message handlers with the
+ journal.
+
+ Args:
+ journal (Journal): The journal to register the message
+ handlers against.
+ """
+ journal.register_message_handler(TransactionBlockMessage,
+ transaction_block_message_handler)
+ journal.register_message_handler(BlockRequestMessage, _blkrequesthandler)
+
+
+class TransactionBlockMessage(message.Message):
+ """Transaction block messages represent the message format
+ for exchanging transaction blocks.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is
+ a system message.
+ IsForward (bool): Whether or not this message is forwarded.
+ IsReliable (bool): Whether or not this message should
+ use reliable delivery.
+ TransactionBlock (TransactionBlock): The block associated
+ with the message.
+ """
+ MessageType = "/" + __name__ + "/TransactionBlock"
+
+ def __init__(self, minfo={}):
+ """Constructor for the TransactionBlockMessage class.
+
+ Args:
+ minfo (dict): A dict of initial values for the new
+ TransactionBlockMessage.
+ """
+ super(TransactionBlockMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = False
+ self.IsReliable = True
+ self.TransactionBlock = None
+
+ def dump(self):
+ """Returns a dict containing information about the
+ transaction block message.
+
+ Returns:
+ dict: A dict containing information about the
+ transaction block message.
+ """
+ result = super(TransactionBlockMessage, self).dump()
+ result['TransactionBlock'] = self.TransactionBlock.dump()
+
+ return result
+
+
+def transaction_block_message_handler(msg, journal):
+ """The function called when the node receives a transaction
+ block message.
+
+ Args:
+ msg (Message): The transaction block message.
+ journal (Journal): The journal.
+ """
+ # if we already have this block, then there is no reason to
+ # send it on, be conservative about forwarding messages
+ if not msg.TransactionBlock:
+ logger.warn('transaction block message missing transaction block; %s',
+ msg.MessageType)
+ return
+
+ if msg.TransactionBlock.Identifier in journal.BlockStore:
+ return
+
+ journal.commit_transaction_block(msg.TransactionBlock)
+ journal.forward_message(msg, exceptions=[msg.SenderID], initialize=False)
+
+
+class BlockRequestMessage(message.Message):
+ """Represents the message format for block requests.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ IsSystemMessage (bool): Whether or not this message is
+ a system message.
+ IsForward (bool): Whether or not this message is forwarded.
+ IsReliable (bool): Whether or not this message should
+ use reliable delivery.
+ BlockID (str): The id of the requested block.
+ """
+ MessageType = "/" + __name__ + "/BlockRequest"
+
+ def __init__(self, minfo={}):
+ """Constructor for the BlockRequestMessage class.
+
+ Args:
+ minfo (dict): A dict of initial values for the
+ new BlockRequestMessage.
+ """
+ super(BlockRequestMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = False
+ self.IsReliable = True
+
+ self.BlockID = minfo.get('BlockID')
+
+ def dump(self):
+ """Returns a dict containing information about the
+ BlockRequestMessage.
+
+ Returns:
+ dict: A dict containing information about the
+ BlockRequestMessage.
+ """
+ result = super(BlockRequestMessage, self).dump()
+ result['BlockID'] = self.BlockID
+
+ return result
+
+
+def _blkrequesthandler(msg, journal):
+ blk = journal.BlockStore.get(msg.BlockID)
+ if blk:
+ reply = blk.build_message()
+ journal.forward_message(reply)
+ return
+
+ journal.request_missing_block(msg.BlockID,
+ exceptions=[msg.SenderID],
+ request=msg)
diff --git a/journal/messages/transaction_message.py b/journal/messages/transaction_message.py
new file mode 100644
index 0000000000..f22ef6fe83
--- /dev/null
+++ b/journal/messages/transaction_message.py
@@ -0,0 +1,100 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from gossip import message
+
+logger = logging.getLogger(__name__)
+
+
+def register_message_handlers(journal):
+ journal.register_message_handler(TransactionMessage,
+ transaction_message_handler)
+ journal.register_message_handler(TransactionRequestMessage,
+ _txnrequesthandler)
+
+
+class TransactionMessage(message.Message):
+ MessageType = "/" + __name__ + "/Transaction"
+
+ def __init__(self, minfo={}):
+ super(TransactionMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = False
+ self.IsReliable = True
+ self.Transaction = None
+
+ def dump(self):
+ result = super(TransactionMessage, self).dump()
+ result['Transaction'] = self.Transaction.dump()
+
+ return result
+
+
+def transaction_message_handler(msg, journal):
+ # if we already have this transaction, then there is no reason to
+ # send it on, be conservative about forwarding messages
+ if not msg.Transaction:
+ logger.warn('transaction message missing transaction; %s',
+ msg.MessageType)
+ return
+
+ logger.debug('handle transaction message with identifier %s',
+ msg.Transaction.Identifier)
+
+ if journal.TransactionStore.get(msg.Transaction.Identifier):
+ return
+
+ if journal.PendingTransactions.get(msg.Transaction.Identifier):
+ return
+
+ journal.add_pending_transaction(msg.Transaction)
+ journal.forward_message(msg, exceptions=[msg.SenderID], initialize=False)
+
+
+class TransactionRequestMessage(message.Message):
+ MessageType = "/" + __name__ + "/TransactionRequest"
+
+ def __init__(self, minfo={}):
+ super(TransactionRequestMessage, self).__init__(minfo)
+
+ self.IsSystemMessage = False
+ self.IsForward = False
+ self.IsReliable = True
+
+ self.TransactionID = minfo.get('TransactionID')
+
+ def dump(self):
+ result = super(TransactionRequestMessage, self).dump()
+ result['TransactionID'] = self.TransactionID
+
+ return result
+
+
+def _txnrequesthandler(msg, journal):
+ # a transaction might be in the commited transaction list only as a
+ # placeholder, so we have to make sure that it is there and that it is not
+ # None
+ txn = journal.TransactionStore.get(msg.TransactionID)
+ if txn:
+ reply = txn.build_message()
+ journal.forward_message(reply)
+ return
+
+ journal.request_missing_txn(msg.TransactionID,
+ exceptions=[msg.SenderID],
+ request=msg)
diff --git a/journal/protocol/__init__.py b/journal/protocol/__init__.py
new file mode 100644
index 0000000000..f10a97580a
--- /dev/null
+++ b/journal/protocol/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['journal_transfer']
diff --git a/journal/protocol/journal_transfer.py b/journal/protocol/journal_transfer.py
new file mode 100644
index 0000000000..16f77e202e
--- /dev/null
+++ b/journal/protocol/journal_transfer.py
@@ -0,0 +1,321 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import random
+import sys
+import traceback
+from collections import OrderedDict
+
+from twisted.internet import reactor
+
+from journal.messages.journal_transfer import BlockListReplyMessage
+from journal.messages.journal_transfer import BlockListRequestMessage
+from journal.messages.journal_transfer import BlockReplyMessage
+from journal.messages.journal_transfer import BlockRequestMessage
+from journal.messages.journal_transfer import TransactionReplyMessage
+from journal.messages.journal_transfer import TransactionRequestMessage
+from journal.messages.journal_transfer import TransferFailedMessage
+from journal.messages.journal_transfer import UncommitedListReplyMessage
+
+logger = logging.getLogger(__name__)
+
+
+def start_journal_transfer(journal, oncomplete):
+ """Initiates journal transfer to peers.
+
+ Args:
+ journal (Journal): The journal to transfer.
+ oncomplete (function): The function to call when the
+ journal transfer has completed.
+
+ Returns:
+ bool: Whether or not a journal transfer was initiated.
+ """
+ # if there are no peers then bad stuff has happened or else we
+ # are the first one
+ if len(journal.peer_list()) == 0:
+ logger.warn('no peers found for journal transfer')
+ return False
+
+ transfer = JournalTransfer(journal, oncomplete)
+ transfer.initiate_journal_transfer()
+
+ return True
+
+
+class JournalTransfer(object):
+ """Handles the transfer of a journal to peers.
+
+ Attributes:
+ Journal (Journal): The journal to transfer.
+ Callback (function): The function to call when the
+ journal transfer has completed.
+ """
+ def __init__(self, journal, callback):
+ """Constructor for the JournalTransfer class.
+
+ Args:
+ journal (Journal): The journal to transfer.
+ callback (function): The function to call when
+ the journal transfer has completed.
+ """
+ self.Journal = journal
+ self.Callback = callback
+
+ def initiate_journal_transfer(self):
+ """Initiates journal transfer to peers.
+ """
+ self.Peer = random.choice(self.Journal.peer_list())
+ logger.info('initiate journal transfer from %s', self.Peer)
+
+ self.BlockMap = OrderedDict()
+ self.PendingBlocks = []
+ self.TransactionMap = OrderedDict()
+ self.PendingTransactions = []
+
+ self.ProcessingUncommited = False
+ self.UncommitedTransactions = []
+
+ self.Journal.register_message_handler(BlockListReplyMessage,
+ self._blocklistreplyhandler)
+ self.Journal.register_message_handler(BlockReplyMessage,
+ self._blockreplyhandler)
+ self.Journal.register_message_handler(UncommitedListReplyMessage,
+ self._txnlistreplyhandler)
+ self.Journal.register_message_handler(TransactionReplyMessage,
+ self._txnreplyhandler)
+ self.Journal.register_message_handler(TransferFailedMessage,
+ self._failedhandler)
+
+ request = BlockListRequestMessage()
+ request.BlockListIndex = 0
+ self.Journal.send_message(request, self.Peer.Identifier)
+
+ def _failedhandler(self, msg, journal):
+ logger.warn('journal transfer failed')
+
+ # clear all of the message handlers
+ self.Journal.clear_message_handler(BlockListReplyMessage)
+ self.Journal.clear_message_handler(BlockReplyMessage)
+ self.Journal.clear_message_handler(UncommitedListReplyMessage)
+ self.Journal.clear_message_handler(TransactionReplyMessage)
+ self.Journal.clear_message_handler(TransferFailedMessage)
+
+ self.RetryID = reactor.callLater(10, self.initiate_journal_transfer)
+
+ def _kick_off_next_block(self):
+ """
+ Check to see if there are any blocks to be received and kick off
+ retrieval of the first one that doesnt already exist in our journal
+ """
+ while len(self.PendingBlocks) > 0:
+ blockid = self.PendingBlocks.pop(0)
+ if blockid not in self.Journal.BlockStore:
+ request = BlockRequestMessage()
+ request.BlockID = blockid
+ self.Journal.send_message(request, self.Peer.Identifier)
+ return True
+
+ # copy the block information
+ self.BlockMap[blockid] = self.Journal.BlockStore[blockid]
+
+ # add all the transaction to the transaction map in order
+ for txnid in self.BlockMap[blockid].TransactionIDs:
+ self.TransactionMap[txnid] = None
+ self.PendingTransactions.append(txnid)
+
+ # there were no blocks, but we might have added transactions to the
+ # queue to return so kick off a transaction request for the next one
+ return self._kick_off_next_transaction()
+
+ def _kick_off_next_transaction(self):
+ """
+ Check to see if there are any transactions to be received and kick off
+ retrieval of the first one that doesnt already exist in our journal
+ """
+ while len(self.PendingTransactions) > 0:
+ txnid = self.PendingTransactions.pop(0)
+ if txnid not in self.Journal.TransactionStore:
+ request = TransactionRequestMessage()
+ request.TransactionID = txnid
+ self.Journal.send_message(request, self.Peer.Identifier)
+ return True
+
+ self.TransactionMap[txnid] = self.Journal.TransactionStore[txnid]
+
+ return False
+
+ def _blocklistreplyhandler(self, msg, journal):
+ logger.debug('request %s, recieved %d block identifiers from %s',
+ msg.InReplyTo[:8], len(msg.BlockIDs), self.Peer.Name)
+
+ # add all the blocks to the block map in order
+ for blockid in msg.BlockIDs:
+ self.BlockMap[blockid] = None
+ self.PendingBlocks.append(blockid)
+
+ # if we received any block ids at all then we need to go back and ask
+ # for more when no more are returned, then we know we have all of them
+ if len(msg.BlockIDs) > 0:
+ request = BlockListRequestMessage()
+ request.BlockListIndex = msg.BlockListIndex + len(msg.BlockIDs)
+ self.Journal.send_message(request, self.Peer.Identifier)
+ return
+
+ # no more block list messages, now start grabbing blocks
+ if self._kick_off_next_block():
+ return
+
+ # kick off retrieval of the uncommited transactions
+ request = UncommitedListRequestMessage()
+ request.TransactionListIndex = 0
+ self.Journal.send_message(request, self.Peer.Identifier)
+
+ def _txnlistreplyhandler(self, msg, journal):
+ logger.debug('request %s, recieved %d uncommited transactions from %s',
+ msg.InReplyTo[:8], len(msg.TransactionIDs),
+ self.Peer.Name)
+
+ # save the uncommited transactions
+ for txnid in msg.TransactionIDs:
+ self.UncommitedTransactions.append(txnid)
+
+ if len(msg.TransactionIDs) > 0:
+ request = UncommitedListRequestMessage()
+ request.TransactionListIndex = msg.TransactionListIndex + len(
+ msg.TransactionIDs)
+ self.Journal.send_message(request, self.Peer.Identifier)
+ return
+
+ # if there are no more transactions, then get the next block
+ if self._kick_off_next_block():
+ return
+
+ self._handleuncommited()
+
+ def _blockreplyhandler(self, msg, journal):
+ # leaving this as info to provide some feedback in the log for
+ # ongoing progress on the journal transfer
+ logger.info('request %s, recieved block from %s', msg.InReplyTo[:8],
+ self.Peer.Name)
+
+ # the actual transaction block is encapsulated in a message within the
+ # reply message so we need to decode it here... this is mostly to make
+ # sure we have the handle to the gossiper for decoding
+ btype = msg.TransactionBlockMessage['__TYPE__']
+ bmessage = self.Journal.unpack_message(btype,
+ msg.TransactionBlockMessage)
+
+ self.BlockMap[
+ bmessage.TransactionBlock.Identifier] = bmessage.TransactionBlock
+
+ # add all the transaction to the transaction map in order
+ for txnid in bmessage.TransactionBlock.TransactionIDs:
+ self.TransactionMap[txnid] = None
+ self.PendingTransactions.append(txnid)
+
+ # check to see if there are any transactions
+ if self._kick_off_next_transaction():
+ return
+
+ # and if there are no more transactions then
+ # check to see if there are more blocks
+ if self._kick_off_next_block():
+ return
+
+ self._handleuncommited()
+
+ def _txnreplyhandler(self, msg, journal):
+ logger.debug('request %s, received transaction from %s',
+ msg.InReplyTo[:8], self.Peer.Name)
+
+ # the actual transaction is encapsulated in a message within the reply
+ # message so we need to decode it here... this is mostly to make sure
+ # we have the handle to the gossiper for decoding
+ ttype = msg.TransactionMessage['__TYPE__']
+ tmessage = self.Journal.unpack_message(ttype, msg.TransactionMessage)
+
+ self.TransactionMap[
+ tmessage.Transaction.Identifier] = tmessage.Transaction
+
+ # if there are more transaction pending for this block, then kick off
+ # retrieval of the next one
+ if self._kick_off_next_transaction():
+ return
+
+ # finished the last block, now its time to start the next one, send
+ # a request for it
+ if self._kick_off_next_block():
+ return
+
+ self._handleuncommited()
+
+ def _handleuncommited(self):
+ logger.debug('transition to uncommited messages')
+
+ if not self.ProcessingUncommited and len(
+ self.UncommitedTransactions) > 0:
+ self.ProcessingUncommited = True
+
+ for txnid in self.UncommitedTransactions:
+ self.TransactionMap[txnid] = None
+ self.PendingTransactions.append(txnid)
+
+ # now kick off the retrieval of the first transaction
+ if self._kick_off_next_transaction():
+ return
+
+ self._finish()
+
+ def _finish(self):
+ # everything has been returned... time to update the journal,
+ # first copy the transactions over and apply them to the
+ # global store, then copy the blocks in
+
+ try:
+ for txnid, txn in self.TransactionMap.iteritems():
+ self.Journal.add_pending_transaction(txn)
+
+ for blkid, blk in self.BlockMap.iteritems():
+ self.Journal.commit_transaction_block(blk)
+
+ except AssertionError:
+ (etype, evalue, trace) = sys.exc_info()
+ tbinfo = traceback.extract_tb(trace)
+ (filename, line, func, text) = tbinfo[-1]
+ logger.error('assertion failure in file %s at line %s', filename,
+ line)
+ except:
+ logger.error(
+ 'unexpected error happened commiting blocks during journal '
+ 'transfer; %s',
+ str(sys.exc_info()[0]))
+
+ logger.info(
+ 'journal transfered from %s, %d transactions, %d blocks, current '
+ 'head is %s',
+ self.Peer, len(self.TransactionMap), len(self.BlockMap),
+ self.Journal.MostRecentCommitedBlockID[:8])
+
+ # clear all of the message handlers
+ self.Journal.clear_message_handler(BlockListReplyMessage)
+ self.Journal.clear_message_handler(BlockReplyMessage)
+ self.Journal.clear_message_handler(UncommitedListReplyMessage)
+ self.Journal.clear_message_handler(TransactionReplyMessage)
+ self.Journal.clear_message_handler(TransferFailedMessage)
+
+ # self.RetryID.cancel()
+ self.Callback()
diff --git a/journal/transaction.py b/journal/transaction.py
new file mode 100644
index 0000000000..59f352cfba
--- /dev/null
+++ b/journal/transaction.py
@@ -0,0 +1,145 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+import time
+
+from gossip import signed_object
+from journal.messages import transaction_message
+
+logger = logging.getLogger(__name__)
+
+
+class SerializationError(Exception):
+ """Exception raised for errors serializing or deserializing
+ transaction messages.
+
+ Attributes:
+ TransactionType (str): The transaction type in which the
+ error occurred.
+ Message (str): The explanation of the error.
+ """
+
+ def __init__(self, txntype, msg):
+ """Constructor for SerializationError class.
+
+ Args:
+ txntype (str): The transaction type in which the
+ error occurred.
+ msg (str): The explanation of the error.
+ """
+ self.TransactionType = txntype
+ self.Message = msg
+
+ def __str__(self):
+ return "Serialization error in class {0}: {1}".format(
+ self.TransactionType, self.Message)
+
+
+class Status(object):
+ """Enumeration for status.
+ """
+ unknown = 0
+ pending = 1
+ committed = 2
+ failed = 3
+
+
+class Transaction(signed_object.SignedObject):
+ """A Transaction is a set of updates to be applied atomically
+ to a ledger.
+
+ It has a unique identifier and a signature to validate the source.
+
+ Note:
+ The OriginatorID in the transaction is the verifying key for an
+ individual not a Node as is the case for the signer of a message.
+
+ Attributes:
+ TransactionTypeName (str): The name of the transaction type.
+ MessageType (type): The transaction class.
+ Nonce (float): A locally unique identifier.
+ Status (Status): The status of the transaction.
+ Dependencies (list): A list of transactions that this transaction
+ is dependent on.
+ """
+
+ TransactionTypeName = '/Transaction'
+ MessageType = transaction_message.TransactionMessage
+
+ def __init__(self, minfo={}):
+ """Constructor for the Transaction class.
+
+ Args:
+ minfo (dict): A dict of key/values for transaction.
+ """
+ super(Transaction, self).__init__(minfo)
+
+ self.Nonce = minfo.get('Nonce', time.time())
+
+ self.Status = Status.unknown
+ self.InBlock = None
+
+ self.Dependencies = []
+ for txnid in minfo.get('Dependencies', []):
+ self.Dependencies.append(str(txnid))
+
+ self._data = None
+
+ def __str__(self):
+ return " and ".join(map(lambda u: str(u), self.Updates))
+
+ def apply(self, store):
+ pass
+
+ def add_to_pending(self):
+ """Predicate to note that a transaction should be added to pending
+ transactions.
+
+ In general incentive transactions should not be included in
+ the pending transaction list.
+
+ Returns:
+ bool: True.
+ """
+ return True
+
+ def build_message(self):
+ """Constructs a message containing the transaction.
+
+ Returns:
+ msg (Message): A transaction message containing the
+ transaction.
+ """
+ msg = self.MessageType()
+ msg.Transaction = self
+ return msg
+
+ def dump(self):
+ """Builds a dict containing information about the transaction.
+
+ Returns:
+ dict: A dict containing details about the transaction.
+ """
+ result = super(Transaction, self).dump()
+
+ result['TransactionType'] = self.TransactionTypeName
+ result['Nonce'] = self.Nonce
+
+ result['Dependencies'] = []
+ for txnid in self.Dependencies:
+ result['Dependencies'].append(str(txnid))
+
+ return result
diff --git a/journal/transaction_block.py b/journal/transaction_block.py
new file mode 100644
index 0000000000..778e9f5c53
--- /dev/null
+++ b/journal/transaction_block.py
@@ -0,0 +1,172 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from gossip import common, signed_object
+from journal.messages import transaction_block_message
+
+logger = logging.getLogger(__name__)
+
+
+class Status(object):
+ """Enumeration for status.
+
+ Capture the various states of the block
+ incomplete -- some transactions might be missing
+ complete -- all transactions present, not confirmed
+ valid -- all transaction present, confirmed valid
+ invalid -- all transactions present, confirmed invalid
+ """
+ incomplete = 0
+ complete = 1
+ valid = 2
+ invalid = 3
+
+
+class TransactionBlock(signed_object.SignedObject):
+ """A Transaction Block is a set of transactions to be applied to
+ a ledger.
+
+ Attributes:
+ TransactionBlockTypeName (str): The name of the transaction
+ block type.
+ MessageType (type): The transaction block message class.
+ BlockNum (int): The number of the block.
+ PreviousBlockID (str): The ID of the previous block.
+ TransactionIDs (list): A list of transaction IDs on this block.
+ Status (Status): The status of the block.
+ TransactionDepth (int): The number of transactions on the block.
+ """
+ TransactionBlockTypeName = "/TransactionBlock"
+ MessageType = transaction_block_message.TransactionBlockMessage
+
+ def __init__(self, minfo={}):
+ """Constructor for the TransactionBlock class.
+
+ Args:
+ minfo (dict): A dict of values for initializing
+ TransactionBlocks.
+ """
+ super(TransactionBlock, self).__init__(minfo)
+
+ self.BlockNum = minfo.get('BlockNum', 0)
+ self.PreviousBlockID = minfo.get('PreviousBlockID',
+ common.NullIdentifier)
+ self.TransactionIDs = []
+
+ if 'TransactionIDs' in minfo:
+ for txnid in minfo['TransactionIDs']:
+ self.TransactionIDs.append(str(txnid))
+
+ self.CommitTime = 0
+ self.Status = Status.incomplete
+ self.TransactionDepth = 0
+
+ def __str__(self):
+ return "{0}, {1}, {2}, {3:0.2f}".format(
+ self.BlockNum, self.Identifier[:8], len(self.TransactionIDs),
+ self.CommitTime)
+
+ def __cmp__(self, other):
+ """
+ Compare two blocks, this will throw an error unless
+ both blocks are valid.
+ """
+ if self.Status != Status.valid:
+ raise ValueError('block {0} must be valid for comparison'.format(
+ self.Identifier))
+
+ if other.Status != Status.valid:
+ raise ValueError('block {0} must be valid for comparison'.format(
+ other.Identifier))
+
+ if self.TransactionDepth < other.TransactionDepth:
+ return -1
+ elif self.TransactionDepth > other.TransactionDepth:
+ return 1
+ else:
+ return cmp(self.Identifier, other.Identifier)
+
+ def is_valid(self, journal):
+ """Verify that the block received is valid.
+
+ For now this simply verifies that the signature is correct.
+
+ Args:
+ journal (journal.Journal): Journal for pulling context.
+ """
+ return super(TransactionBlock, self).is_valid(None)
+
+ def missing_transactions(self, journal):
+ """Verify that all the transaction references in the block exist
+ in the transaction store and request any that are missing.
+
+ Args:
+ journal (journal.Journal): Journal for pulling context.
+
+ Returns:
+ list: A list of missing transactions.
+ """
+ missing = []
+ for txnid in self.TransactionIDs:
+ if txnid not in journal.TransactionStore:
+ missing.append(txnid)
+
+ return missing
+
+ def update_transaction_depth(self, journal):
+ """Compute the depth of transactions.
+
+ Args:
+ journal (journal.Journal): Journal for pulling context.
+ """
+ assert self.Status == Status.valid
+ self.TransactionDepth = len(self.TransactionIDs)
+
+ if self.PreviousBlockID != common.NullIdentifier:
+ assert self.PreviousBlockID in journal.BlockStore
+ self.TransactionDepth += journal.BlockStore[
+ self.PreviousBlockID].TransactionDepth
+
+ def build_message(self):
+ """Constructs a message containing the transaction block.
+
+ Returns:
+ msg (Message): A transaction block message containing the
+ transaction block.
+ """
+ msg = self.MessageType()
+ msg.TransactionBlock = self
+ return msg
+
+ def dump(self):
+ """Builds a dict containing information about the transaction
+ block.
+
+ Returns:
+ dict: A dict containing details about the transaction block.
+ """
+ result = super(TransactionBlock, self).dump()
+
+ result['BlockNum'] = self.BlockNum
+ result['PreviousBlockID'] = self.PreviousBlockID
+ result['TransactionBlockType'] = self.TransactionBlockTypeName
+
+ result['TransactionIDs'] = []
+ for txnid in self.TransactionIDs:
+ result['TransactionIDs'].append(str(txnid))
+
+ return result
diff --git a/ledger/__init__.py b/ledger/__init__.py
new file mode 100644
index 0000000000..8dc77c4edc
--- /dev/null
+++ b/ledger/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = []
diff --git a/ledger/transaction/__init__.py b/ledger/transaction/__init__.py
new file mode 100644
index 0000000000..8758b8d682
--- /dev/null
+++ b/ledger/transaction/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+__all__ = ['asset_registry', 'endpoint_registry', 'integer_key']
diff --git a/ledger/transaction/asset_registry.py b/ledger/transaction/asset_registry.py
new file mode 100644
index 0000000000..c6cfea19b3
--- /dev/null
+++ b/ledger/transaction/asset_registry.py
@@ -0,0 +1,221 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+import logging
+
+from gossip import common
+from journal import transaction, global_store_manager
+from journal.messages import transaction_message
+
+logger = logging.getLogger(__name__)
+
+
+def register_transaction_types(ledger):
+ """Registers the asset registry transaction types on the ledger.
+
+ Args:
+ ledger (journal.journal_core.Journal): The ledger to register
+ the transaction type against.
+ """
+ ledger.register_message_handler(
+ AssetRegistryTransactionMessage,
+ transaction_message.transaction_message_handler)
+ ledger.add_transaction_store(AssetRegistryTransaction)
+
+
+class AssetRegistryTransactionMessage(transaction_message.TransactionMessage):
+ """Asset registry transaction messages represent asset registry
+ transactions.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ Transaction (AssetRegistryTransaction): The transaction the
+ message is associated with.
+ """
+ MessageType = "/" + __name__ + "/Transaction"
+
+ def __init__(self, minfo={}):
+ """Constructor for the AssetRegistryTransactionMessage class.
+
+ Args:
+ minfo (dict): Dictionary of values for message fields.
+ """
+ super(AssetRegistryTransactionMessage, self).__init__(minfo)
+
+ tinfo = minfo.get('Transaction', {})
+ self.Transaction = AssetRegistryTransaction(tinfo)
+
+
+class Update(object):
+ """Updates represent potential changes to the asset registry.
+
+ Attributes:
+ KnownVerbs (list): A list of possible update actions.
+ Verb (str): The action of this update, defaults to 'reg'.
+ AssetID (str): The asset ID to be updated.
+ OwnerID (str): The ID of the owner of the asset.
+ """
+ KnownVerbs = ['reg', 'own', 'unr']
+
+ def __init__(self, minfo={}):
+ """Constructor for Update class.
+
+ Args:
+ minfo (dict): Dictionary of values for update fields.
+ """
+ self.Verb = minfo.get('Verb', 'reg')
+ self.AssetID = minfo.get('AssetID', common.NullIdentifier)
+ self.OwnerID = minfo.get('OwnerID', common.NullIdentifier)
+
+ def __str__(self):
+ return "({0} {1} {2})".format(self.Verb, self.AssetID[:8],
+ self.OwnerID[:8])
+
+ def is_valid(self, store):
+ """Determines if the update is valid.
+
+ is_valid() checks to see if the specified operation is valid
+ in the context of the asset provided. For example, it is not
+ valid to register an asset that already exists.
+
+ Args:
+ store (dict): Transaction store mapping.
+
+ Returns:
+ bool: Whether or not the update action is valid.
+ """
+ logger.debug('check update %s', str(self))
+
+ # check for an attempt to register an asset that already exists
+ if self.Verb == 'reg' and self.AssetID in store:
+ return False
+
+ # check for an attempt to change owner of a non-existant asset
+ if self.Verb == 'own' and self.AssetID not in store:
+ return False
+
+ # check for an attempt to unregister a non-existant asset
+ if self.Verb == 'unr' and self.AssetID not in store:
+ return False
+
+ return True
+
+ def apply(self, store):
+ """Applies the update to the asset in the transaction store.
+
+ Args:
+ store (dict): Transaction store mapping.
+ """
+ logger.debug('apply %s', str(self))
+
+ if self.Verb == 'reg' or self.Verb == 'own':
+ store[self.AssetID] = self.OwnerID
+ elif self.Verb == 'unr':
+ store[self.AssetID] = common.NullIdentifier
+ else:
+ logger.info('unknown verb %s', self.Verb)
+
+ def dump(self):
+ """Returns a dict with attributes from the update object.
+
+ Returns:
+ dict: The asset ID, owner ID, and verb from the update object.
+ """
+ result = {
+ 'AssetID': self.AssetID,
+ 'OwnerID': self.OwnerID,
+ 'Verb': self.Verb
+ }
+ return result
+
+
+class AssetRegistryTransaction(transaction.Transaction):
+ """A Transaction is a set of updates to be applied atomically
+ to a ledger.
+
+ It has a unique identifier and a signature to validate the source.
+
+ Attributes:
+ TransactionTypeName (str): The name of the asset registry
+ transaction type.
+ TransactionStoreType (type): The type of transaction store.
+ MessageType (type): The object type of the message associated
+ with this transaction.
+ Updates (list): A list of asset registry updates associated
+ with this transaction.
+ """
+
+ TransactionTypeName = '/AssetRegistryTransaction'
+ TransactionStoreType = global_store_manager.KeyValueStore
+ MessageType = AssetRegistryTransactionMessage
+
+ def __init__(self, minfo={}):
+ """Constructor for the AssetRegistryTransaction class.
+
+ Args:
+ minfo: Dictionary of values for transaction fields.
+ """
+ super(AssetRegistryTransaction, self).__init__(minfo)
+
+ self.Updates = []
+
+ if 'Updates' in minfo:
+ for update in minfo['Updates']:
+ self.Updates.append(Update(update))
+
+ def __str__(self):
+ return " and ".join(map(lambda u: str(u), self.Updates))
+
+ def is_valid(self, store):
+ """Determines if the transaction is valid.
+
+ Args:
+ store (dict): Transaction store mapping.
+
+ Returns:
+ bool: Whether or not the transaction is valid.
+ """
+ if not super(AssetRegistryTransaction, self).is_valid(store):
+ return False
+
+ for update in self.Updates:
+ if not update.is_valid(store):
+ logger.debug('invalid transaction: %s', str(update))
+ return False
+
+ return True
+
+ def apply(self, store):
+ """Applies all the updates in the transaction to the asset
+ in the transaction store.
+
+ Args:
+ store (dict): Transaction store mapping.
+ """
+ for update in self.Updates:
+ update.apply(store)
+
+ def dump(self):
+ """Returns a dict with attributes from the transaction object.
+
+ Returns:
+ dict: The updates from the transaction object.
+ """
+ result = super(AssetRegistryTransaction, self).dump()
+
+ result['Updates'] = []
+ for update in self.Updates:
+ result['Updates'].append(update.dump())
+
+ return result
diff --git a/ledger/transaction/endpoint_registry.py b/ledger/transaction/endpoint_registry.py
new file mode 100644
index 0000000000..88da78facb
--- /dev/null
+++ b/ledger/transaction/endpoint_registry.py
@@ -0,0 +1,264 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from journal import transaction, global_store_manager
+from journal.messages import transaction_message
+
+logger = logging.getLogger(__name__)
+
+
+def register_transaction_types(ledger):
+ """Registers the endpoint registry asset types on the ledger.
+
+ Args:
+ ledger (journal.journal_core.Journal): The ledger to register
+ the transaction type against.
+ """
+ ledger.register_message_handler(
+ EndpointRegistryTransactionMessage,
+ transaction_message.transaction_message_handler)
+ ledger.add_transaction_store(EndpointRegistryTransaction)
+
+
+class EndpointRegistryTransactionMessage(
+ transaction_message.TransactionMessage):
+ """Endpoint registry transaction messages represent endpoint registry
+ transactions.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ Transaction (EndpointRegistryTransaction): The transaction the
+ message is associated with.
+ """
+ MessageType = "/" + __name__ + "/Transaction"
+
+ def __init__(self, minfo={}):
+ super(EndpointRegistryTransactionMessage, self).__init__(minfo)
+
+ tinfo = minfo.get('Transaction', {})
+ self.Transaction = EndpointRegistryTransaction(tinfo)
+
+
+class Update(object):
+ """Updates represent potential changes to the endpoint registry.
+
+ Attributes:
+ KnownVerbs (list): A list of possible update actions.
+ Verb (str): The action of this update, defaults to 'reg'.
+ Domain (str): The domain of the endpoint.
+ Name (str): The name of the endpoint.
+ NodeIdentifier (str): The identifier of the endpoint.
+ NetHost (str): The hostname or IP address of the endpoint.
+ NetPort (int): The port number of the endpoint.
+ """
+ KnownVerbs = ['reg', 'unr']
+
+ @staticmethod
+ def create_from_node(node, domain='/'):
+ """Creates a new Update object based on the attributes of a
+ node.
+
+ Args:
+ node (Node): The node to create an endpoint registry update
+ object based on.
+ domain (str): The domain of the endpoint.
+
+ Returns:
+ Update: An update object for registering the node's details.
+ """
+ update = Update()
+
+ update.Verb = 'reg'
+ update.Domain = domain
+ update.Name = node.Name
+ update.NodeIdentifier = node.Identifier
+ update.NetHost = node.NetHost
+ update.NetPort = node.NetPort
+
+ return update
+
+ def __init__(self, minfo={}):
+ """Constructor for Update class.
+
+ Args:
+ minfo (dict): Dictionary of values for update fields.
+ """
+ self.Verb = minfo.get('Verb', 'reg')
+
+ self.Domain = minfo.get('Domain', '/')
+ self.Name = minfo.get('Name', 'unknown')
+ self.NodeIdentifier = minfo.get('NodeIdentifier', '')
+ self.NetHost = minfo.get('NetHost', '0.0.0.0')
+ self.NetPort = minfo.get('NetPort', 0)
+
+ def __str__(self):
+ return "({0} {1} {2} {3} {4}:{5})".format(
+ self.Verb, self.NodeIdentifier, self.Name, self.Domain,
+ self.NetHost, self.NetPort)
+
+ def is_valid(self, store, originatorid):
+ """Determines if the update is valid.
+
+ Args:
+ store (dict): Transaction store mapping.
+ originatorid (str): Node identifier of transaction originator.
+ """
+ logger.debug('check update %s from %s', str(self), originatorid)
+
+ # if the asset exists then the node must be the same as the transaction
+ # originator
+ if (self.NodeIdentifier in store
+ and self.NodeIdentifier != originatorid):
+ return False
+
+ # check for an attempt to change owner of a non-existant asset
+ if self.Verb == 'unr' and self.NodeIdentifier not in store:
+ return False
+
+ return True
+
+ def apply(self, store):
+ """Applies the update to the asset in the transaction store.
+
+ Args:
+ store (dict): Transaction store mapping.
+ """
+ logger.debug('apply %s', str(self))
+
+ if self.Verb == 'reg':
+ store[self.NodeIdentifier] = {
+ 'Name': self.Name,
+ 'Domain': self.Domain,
+ 'NodeIdentifier': self.NodeIdentifier,
+ 'Host': self.NetHost,
+ 'Port': self.NetPort
+ }
+ elif self.Verb == 'unr':
+ del store[self.NodeIdentifier]
+ else:
+ logger.info('unknown verb %s', self.Verb)
+
+ def dump(self):
+ """Returns a dict with attributes from the update object.
+
+ Returns:
+ dict: A dictionary containing attributes from the update
+ object.
+ """
+ result = {
+ 'Verb': self.Verb,
+ 'Domain': self.Domain,
+ 'Name': self.Name,
+ 'NodeIdentifier': self.NodeIdentifier,
+ 'NetHost': self.NetHost,
+ 'NetPort': self.NetPort
+ }
+ return result
+
+
+class EndpointRegistryTransaction(transaction.Transaction):
+ """A Transaction is a set of updates to be applied atomically
+ to a ledger.
+
+ It has a unique identifier and a signature to validate the source.
+
+ Attributes:
+ TransactionTypeName (str): The name of the endpoint registry
+ transaction type.
+ TransactionStoreType (type): The type of the transaction store.
+ MessageType (type): The object type of the message associated
+ with this transaction.
+ Updates (list): A list of endpoint registry updates associated
+ with this transaction.
+ """
+ TransactionTypeName = '/EndpointRegistryTransaction'
+ TransactionStoreType = global_store_manager.KeyValueStore
+ MessageType = EndpointRegistryTransactionMessage
+
+ @staticmethod
+ def create_from_node(node, domain='/'):
+ """Creates a new EndpointRegistryTransaction object based on
+ the attributes of a node.
+
+ Args:
+ node (Node): The node to create an endpoint registry update
+ object based on.
+ domain (str): The domain of the endpoint.
+
+ Returns:
+ Update: A transaction contiaining an update for
+ registering the node's details.
+ """
+ regtxn = EndpointRegistryTransaction()
+ regtxn.Updates.append(Update.create_from_node(node, domain))
+
+ return regtxn
+
+ def __init__(self, minfo={}):
+ super(EndpointRegistryTransaction, self).__init__(minfo)
+
+ self.Updates = []
+
+ if 'Updates' in minfo:
+ for update in minfo['Updates']:
+ self.Updates.append(Update(update))
+
+ def __str__(self):
+ return " and ".join(map(lambda u: str(u), self.Updates))
+
+ def is_valid(self, store):
+ """Determines if the transaction is valid.
+
+ Args:
+ store (dict): Transaction store mapping.
+
+ Returns:
+ bool: Whether or not the transaction is valid.
+ """
+ if not super(EndpointRegistryTransaction, self).is_valid(store):
+ return False
+
+ for update in self.Updates:
+ if not update.is_valid(store, self.OriginatorID):
+ logger.debug('invalid transaction: %s', str(update))
+ return False
+
+ return True
+
+ def apply(self, store):
+ """Applies all the updates in the transaction to the endpoint
+ in the transaction store.
+
+ Args:
+ store (dict): Transaction store mapping.
+ """
+ for update in self.Updates:
+ update.apply(store)
+
+ def dump(self):
+ """Returns a dict with attributes from the transaction object.
+
+ Returns:
+ dict: The updates from the transaction object.
+ """
+ result = super(EndpointRegistryTransaction, self).dump()
+
+ result['Updates'] = []
+ for update in self.Updates:
+ result['Updates'].append(update.dump())
+
+ return result
diff --git a/ledger/transaction/integer_key.py b/ledger/transaction/integer_key.py
new file mode 100644
index 0000000000..0c85fc0f1f
--- /dev/null
+++ b/ledger/transaction/integer_key.py
@@ -0,0 +1,215 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import logging
+
+from journal import transaction, global_store_manager
+from journal.messages import transaction_message
+
+logger = logging.getLogger(__name__)
+
+
+def register_transaction_types(ledger):
+ """Registers the integer key transaction types on the ledger.
+
+ Args:
+ ledger (journal.journal_core.Journal): The ledger to register
+ the transaction type against.
+ """
+ ledger.register_message_handler(
+ IntegerKeyTransactionMessage,
+ transaction_message.transaction_message_handler)
+ ledger.add_transaction_store(IntegerKeyTransaction)
+
+
+class IntegerKeyTransactionMessage(transaction_message.TransactionMessage):
+ """Integer key transaction message represent integer key transactions.
+
+ Attributes:
+ MessageType (str): The class name of the message.
+ Transaction (IntegerKeyTransaction): The transaction the
+ message is associated with.
+ """
+ MessageType = "/" + __name__ + "/Transaction"
+
+ def __init__(self, minfo={}):
+ super(IntegerKeyTransactionMessage, self).__init__(minfo)
+
+ tinfo = minfo.get('Transaction', {})
+ self.Transaction = IntegerKeyTransaction(tinfo)
+
+
+class Update(object):
+ """Updates represent potential changes to the integer key registry.
+
+ Attributes:
+ KnownVerbs (list): A list of possible update actions.
+ Verb (str): The action of this update, defaults to 'set'.
+ Name (str): The name of the integer key.
+ Value (int): The value of the integer key.
+ """
+ KnownVerbs = ['set', 'inc', 'dec']
+
+ def __init__(self, minfo={}):
+ """Constructor for the Update class.
+
+ Args:
+ minfo (dict): Dictionary of values for update fields.
+ """
+ self.Verb = minfo['Verb'] if 'Verb' in minfo else 'set'
+ self.Name = minfo['Name'] if 'Name' in minfo else None
+ self.Value = long(minfo['Value']) if 'Value' in minfo else 0
+
+ def __str__(self):
+ return "({0} {1} {2})".format(self.Verb, self.Name, self.Value)
+
+ def is_valid(self, store):
+ """Determines if the update is valid.
+
+ Args:
+ store (dict): Transaction store mapping.
+
+ Returns:
+ bool: Whether or not the update is valid.
+ """
+ logger.debug('check update %s', str(self))
+
+ # in theory, the name should have been checked before the transaction
+ # was submitted... not being too careful about this
+ if not self.Name or self.Name == '':
+ return False
+
+ # in theory, the value should have been checked before the transaction
+ # was submitted... not being too careful about this
+ if not isinstance(self.Value, (int, long)):
+ return False
+
+ # in theory, the value should have been checked before the transaction
+ # was submitted... not being too careful about this
+ if self.Verb == 'set' and self.Name not in store and self.Value >= 0:
+ return True
+
+ if self.Verb == 'inc' and self.Name in store:
+ return True
+
+ # value after a decrement operation must remain above zero
+ if (self.Verb == 'dec' and self.Name in store
+ and store[self.Name] > self.Value):
+ return True
+
+ return False
+
+ def apply(self, store):
+ """Applies the update to the asset in the transaction store.
+
+ Args:
+ store (dict): Transaction store mapping.
+ """
+ logger.debug('apply %s', str(self))
+
+ if self.Verb == 'set':
+ store[self.Name] = self.Value
+ elif self.Verb == 'inc':
+ store[self.Name] += self.Value
+ elif self.Verb == 'dec':
+ store[self.Name] -= self.Value
+ else:
+ logger.info('unknown verb %s', self.Verb)
+
+ def dump(self):
+ """Returns a dict with attributes from the update object.
+
+ Returns:
+ dict: The name, value, and verb from the update object.
+ """
+ result = {'Name': self.Name, 'Value': self.Value, 'Verb': self.Verb}
+ return result
+
+
+class IntegerKeyTransaction(transaction.Transaction):
+ """A Transaction is a set of updates to be applied atomically
+ to a ledger.
+
+ It has a unique identifier and a signature to validate the source.
+
+ Attributes:
+ TransactionTypeName (str): The name of the integer key
+ transaction type.
+ TransactionTypeStore (type): The type of transaction store.
+ MessageType (type): The object type of the message associated
+ with this transaction.
+ Updates (list): A list of integer key registry updates associated
+ with this transaction.
+ """
+ TransactionTypeName = '/IntegerKeyTransaction'
+ TransactionStoreType = global_store_manager.KeyValueStore
+ MessageType = IntegerKeyTransactionMessage
+
+ def __init__(self, minfo={}):
+ """Constructor for the IntegerKeyTransaction class.
+
+ Args:
+ minfo: Dictionary of values for transaction fields.
+ """
+ super(IntegerKeyTransaction, self).__init__(minfo)
+
+ self.Updates = []
+
+ if 'Updates' in minfo:
+ for update in minfo['Updates']:
+ self.Updates.append(Update(update))
+
+ def __str__(self):
+ return " and ".join(map(lambda u: str(u), self.Updates))
+
+ def is_valid(self, store):
+ """Determines if the transaction is valid.
+
+ Args:
+ store (dict): Transaction store mapping.
+ """
+ if not super(IntegerKeyTransaction, self).is_valid(store):
+ return False
+
+ for update in self.Updates:
+ if not update.is_valid(store):
+ logger.debug('invalid transaction: %s', str(update))
+ return False
+
+ return True
+
+ def apply(self, store):
+ """Applies all the updates in the transaction to the transaction
+ store.
+
+ Args:
+ store (dict): Transaction store mapping.
+ """
+ for update in self.Updates:
+ update.apply(store)
+
+ def dump(self):
+ """Returns a dict with attributes from the transaction object.
+
+ Returns:
+ dict: The updates from the transaction object.
+ """
+ result = super(IntegerKeyTransaction, self).dump()
+
+ result['Updates'] = []
+ for update in self.Updates:
+ result['Updates'].append(update.dump())
+
+ return result
diff --git a/nose2.cfg b/nose2.cfg
new file mode 100644
index 0000000000..45e3d0d848
--- /dev/null
+++ b/nose2.cfg
@@ -0,0 +1,13 @@
+[unittest]
+start-dir = tests
+code-directories = ..
+test-file-pattern = test_*.py
+plugins = nose2.plugins.coverage
+ nose2.plugins.junitxml
+
+[coverage]
+always-on = True
+coverage-report = html
+
+[junit-xml]
+always-on = True
diff --git a/packaging/rpm-install-script b/packaging/rpm-install-script
new file mode 100755
index 0000000000..2de59d924e
--- /dev/null
+++ b/packaging/rpm-install-script
@@ -0,0 +1,12 @@
+
+python setup.py install \
+ --single-version-externally-managed \
+ -O1 \
+ --root=$RPM_BUILD_ROOT \
+ --record=INSTALLED_FILES
+
+find $RPM_BUILD_ROOT -name \*.py -exec rm {} \;
+
+cat INSTALLED_FILES | grep -v .py$ > INSTALLED_FILES.new
+mv INSTALLED_FILES.new INSTALLED_FILES
+
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000000..cdf7f67757
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,9 @@
+
+[bdist_rpm]
+requires = python-colorlog
+ python-Twisted
+ cbor
+ pybitcointools
+
+install-script = packaging/rpm-install-script
+
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000000..78bd2600d8
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,148 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import os
+import shutil
+import subprocess
+import sys
+
+from setuptools import setup, Extension, find_packages
+
+
+def bump_version(version):
+ (major, minor, patch) = version.split('.')
+ patch = str(int(patch) + 1)
+ return ".".join([major, minor, patch])
+
+
+def auto_version(default, strict):
+ output = subprocess.check_output(['git', 'describe', '--dirty'])
+ parts = output.strip().split('-', 1)
+ parts[0] = parts[0][1:] # strip the leading 'v'
+ if len(parts) == 2:
+ parts[0] = bump_version(parts[0])
+ if default != parts[0]:
+ msg = "setup.py and (bumped?) git describe versions differ: {} != {}"\
+ .format(default, parts[0])
+ if strict:
+ print >> sys.stderr, "ERROR: " + msg
+ sys.exit(1)
+ else:
+ print >> sys.stderr, "WARNING: " + msg
+ print >> sys.stderr, "WARNING: using setup.py version {}".format(
+ default)
+ parts[0] = default
+
+ if len(parts) == 2:
+ return "-git".join([parts[0], parts[1].replace("-", ".")])
+ else:
+ return parts[0]
+
+
+def version(default):
+ if 'VERSION' in os.environ:
+ if os.environ['VERSION'] == 'AUTO_STRICT':
+ version = auto_version(default, strict=True)
+ elif os.environ['VERSION'] == 'AUTO':
+ version = auto_version(default, strict=False)
+ else:
+ version = os.environ['VERSION']
+ else:
+ version = default + "-dev1"
+ return version
+
+
+if os.name == 'nt':
+ extra_compile_args = ['/EHsc']
+ libraries = ['json-c', 'cryptopp-static']
+ include_dirs = ['deps/include', 'deps/include/cryptopp']
+ library_dirs = ['deps/lib']
+
+else:
+ extra_compile_args = ['-std=c++11']
+ libraries = ['json-c', 'cryptopp']
+ include_dirs = []
+ library_dirs = []
+
+enclavemod = Extension(
+ '_poet_enclave_simulator',
+ ['journal/consensus/poet/poet_enclave_simulator/poet_enclave_simulator.i',
+ 'journal/consensus/poet/poet_enclave_simulator/common.cpp',
+ 'journal/consensus/poet/poet_enclave_simulator/wait_certificate.cpp',
+ 'journal/consensus/poet/poet_enclave_simulator/wait_timer.cpp'],
+ swig_opts=['-c++'],
+ extra_compile_args=extra_compile_args,
+ include_dirs=include_dirs,
+ libraries=libraries,
+ library_dirs=library_dirs)
+
+
+ecdsamod = Extension('_ECDSARecoverModule',
+ ['gossip/ECDSA/ECDSARecoverModule.i',
+ 'gossip/ECDSA/ECDSARecover.cc'],
+ swig_opts=['-c++'],
+ extra_compile_args=extra_compile_args,
+ include_dirs=include_dirs,
+ libraries=libraries,
+ library_dirs=library_dirs)
+
+setup(name='sawtooth-core',
+ version=version('1.0.1'),
+ description='Intel Labs Distributed ledger testbed',
+ author='Mic Bowman, Intel Labs',
+ author_email='mic.bowman@intel.com',
+ url='http://www.intel.com',
+ packages=find_packages(),
+ install_requires=['cbor>=0.1.23', 'colorlog', 'pybitcointools',
+ 'twisted', 'enum'],
+ ext_modules=[enclavemod, ecdsamod],
+ py_modules=['journal.consensus.poet.poet_enclave_simulator'
+ '.poet_enclave_simulator',
+ 'gossip.ECDSA.ECDSARecoverModule'])
+
+if "clean" in sys.argv and "--all" in sys.argv:
+ directory = os.path.dirname(os.path.realpath(__file__))
+ for root, fn_dir, files in os.walk(directory):
+ for fn in files:
+ if fn.endswith(".pyc"):
+ os.remove(os.path.join(root, fn))
+ for filename in [
+ ".coverage",
+ "_ECDSARecoverModule.so",
+ os.path.join("gossip", "ECDSA", "ECDSARecoverModule.py"),
+ os.path.join("gossip", "ECDSA", "ECDSARecoverModule_wrap.cpp"),
+ "_poet_enclave_simulator.so",
+ os.path.join("journal",
+ "consensus",
+ "poet",
+ "poet_enclave_simulator",
+ "poet_enclave_simulator.py"),
+ os.path.join("journal",
+ "consensus",
+ "poet",
+ "poet_enclave_simulator",
+ "_poet_enclave_simulator_wrap.cpp"),
+ "nose2-junit.xml"]:
+ if os.path.exists(os.path.join(directory, filename)):
+ os.remove(os.path.join(directory, filename))
+ shutil.rmtree(os.path.join(directory, "build"), ignore_errors=True)
+ shutil.rmtree(os.path.join(directory, "htmlcov"), ignore_errors=True)
+ shutil.rmtree(os.path.join(directory, "deb_dist"), ignore_errors=True)
+ shutil.rmtree(os.path.join(directory, "doc", "code"), ignore_errors=True)
+ shutil.rmtree(os.path.join(directory, "doc", "_build"),
+ ignore_errors=True)
+ shutil.rmtree(
+ os.path.join(directory, "SawtoothLakeLedger.egg-info"),
+ ignore_errors=True)
diff --git a/tests/test_config.py b/tests/test_config.py
new file mode 100644
index 0000000000..9729d09705
--- /dev/null
+++ b/tests/test_config.py
@@ -0,0 +1,175 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import argparse
+import os
+import tempfile
+import unittest
+
+from gossip.config import AggregateConfig
+from gossip.config import ArgparseOptionsConfig
+from gossip.config import Config
+from gossip.config import EnvConfig
+from gossip.config import JsonConfig
+from gossip.config import JsonFileConfig
+
+
+class TestEnvConfig(unittest.TestCase):
+ def test_load_from_env_notset(self):
+ """Verifies that a configuration variable will not be set if
+ it is missing from os.environ."""
+
+ os.environ.clear()
+ cfg = EnvConfig([("TEST_VAR", "test_env_var")])
+ self.assertIsNotNone(cfg)
+ self.assertNotIn("test_env_var", cfg)
+
+ def test_load_from_env_set(self):
+ """Verifies that a configuration variable will be set correctly if
+ it is present in os.environ."""
+
+ os.environ.clear()
+ os.environ["TEST_VAR"] = "set"
+ cfg = EnvConfig([("TEST_VAR", "test_env_var")])
+ self.assertIn("test_env_var", cfg)
+ self.assertEquals(cfg["test_env_var"], "set")
+
+
+class TestJsonishConfig(unittest.TestCase):
+ def test_load_from_jsonish_file(self):
+ """Verifies that we can load and retrieve a variable from a file
+ when loading with a filename."""
+
+ filename = None
+ try:
+ with tempfile.NamedTemporaryFile(delete=False) as f:
+ filename = f.name
+ f.write('{ "TestVar": "test_value" }\n')
+ cfg = JsonFileConfig(filename)
+ finally:
+ if filename is not None:
+ os.unlink(filename)
+
+ self.assertIn("TestVar", cfg)
+ self.assertEquals(cfg["TestVar"], "test_value")
+
+ def test_load_from_jsonish_no_filename(self):
+ """Verifies that we can use JsonConfig without specifying a
+ filename."""
+
+ cfg = JsonConfig(['{ "TestVar": "test_value" }'])
+
+ self.assertIn("TestVar", cfg)
+ self.assertEquals(cfg["TestVar"], "test_value")
+
+
+class TestArgparseOptionsConfig(unittest.TestCase):
+ def test_argparse_options_config(self):
+ """Verifies that an option set via the command line is in the
+ config and that an unset option is not."""
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument('--cli-option')
+ parser.add_argument('--unset')
+ options = parser.parse_args(['--cli-option=value'])
+
+ cfg = ArgparseOptionsConfig(
+ [
+ ("cli_option", "CliOption"), ("unset", "UnsetOption")
+ ], options)
+
+ self.assertIn("CliOption", cfg)
+ self.assertNotIn("UnsetOption", cfg)
+ self.assertEquals(cfg["CliOption"], "value")
+
+
+class TestAggregateConfig(unittest.TestCase):
+ def test_aggregate_config(self):
+ """Test that resolution of values and sources operate as expected."""
+
+ config1 = Config(source="config1name")
+ config1["keya"] = "value1"
+ config1["keyb"] = "value1"
+
+ config2 = Config(source="config2name")
+ config2["keyb"] = "value2"
+ config2["keyc"] = "value2"
+
+ config3 = Config(source="config3name")
+ config3["keyc"] = "value3"
+
+ multi = AggregateConfig([config1, config2, config3])
+
+ self.assertEquals(multi["keya"], "value1")
+ self.assertEquals(multi["keyb"], "value2")
+ self.assertEquals(multi["keyc"], "value3")
+
+ self.assertEquals(multi.get_source("keya"), "config1name")
+ self.assertEquals(multi.get_source("keyb"), "config2name")
+ self.assertEquals(multi.get_source("keyc"), "config3name")
+
+
+class TestConfig(unittest.TestCase):
+ def test_config_resolve(self):
+ """Test that resolution of substituted values operate as expected.
+
+ Tests the recursion of values, and that circular dependencies
+ break in the expected manner."""
+
+ cfg = Config()
+ cfg["keya"] = "value1"
+ cfg["keyb"] = "{A}"
+ cfg["keyc"] = "{B}"
+ cfg["keyd"] = "{C}"
+ cfg["keye"] = "{D}"
+ cfg["keyf"] = "{E}"
+ cfg["keyg"] = "{F}"
+ cfg["keyh"] = "{G}"
+ cfg["keyi"] = "{H}"
+ cfg["circ1"] = "{c2}"
+ cfg["circ2"] = "{c1}"
+ cfg["circular"] = "{circular}"
+ cfg["list"] = ["should", "be", "ignored"]
+
+ resolved = cfg.resolve({
+ "A": "keya",
+ "B": "keyb",
+ "C": "keyc",
+ "D": "keyd",
+ "E": "keye",
+ "F": "keyf",
+ "G": "keyg",
+ "H": "keyh",
+ "c1": "circ1",
+ "c2": "circ2",
+ "circular": "circular",
+ "undef": "undef",
+ })
+
+ self.assertEquals(resolved["keyb"], "value1")
+ self.assertEquals(resolved["keyc"], "value1")
+ self.assertEquals(resolved["keyd"], "value1")
+ self.assertEquals(resolved["keye"], "value1")
+ self.assertEquals(resolved["keyf"], "value1")
+ self.assertEquals(resolved["keyg"], "value1")
+ self.assertEquals(resolved["keyh"], "value1")
+ self.assertEquals(resolved["keyi"], "value1")
+ self.assertIn(resolved["circ1"], ["{c1}", "{c2}"])
+ self.assertIn(resolved["circ2"], ["{c1}", "{c2}"])
+ self.assertEquals(resolved["circular"], "{circular}")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_ecdsa.py b/tests/test_ecdsa.py
new file mode 100644
index 0000000000..5f2467dd6d
--- /dev/null
+++ b/tests/test_ecdsa.py
@@ -0,0 +1,107 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+import unittest
+
+import pybitcointools as pbt
+
+import gossip.signed_object
+import gossip.ECDSA.ECDSARecoverModule as ecnative
+
+
+class TestPKRecover(unittest.TestCase):
+ def test_pbt_match(self):
+ """
+ Tests matching results between pybitcointools and native
+ ECDSA key recovery
+ """
+ # This key has a small public key value which tests padding
+ wifstr = '5JtMb6tmM9vT6QHyM7RR8pjMViqccukgMFNCPvG5xhLVf6CMoGx'
+ priv = pbt.decode_privkey(wifstr, 'wif')
+ msg = 'foo'
+ sig = pbt.ecdsa_sign(msg, priv)
+ native_recovered = gossip.signed_object.get_verifying_key(msg, sig)
+ py_recovered = pbt.ecdsa_recover(msg, sig)
+ self.assertEquals(native_recovered, py_recovered)
+
+ def test_bulk_keymatch(self):
+ """
+ Tests key recovery over several keys
+ """
+ msg = 'foo'
+ self.longMessage = True
+ for x in range(0, 20):
+ priv = pbt.random_key()
+ sig = pbt.ecdsa_sign(msg, priv)
+ native_recovered = gossip.signed_object.get_verifying_key(msg, sig)
+ py_recovered = pbt.ecdsa_recover(msg, sig)
+ self.assertEquals(native_recovered, py_recovered,
+ "Priv Key that failed: {}".format(priv))
+
+ def test_exception_on_empty_param(self):
+ """
+ Tests Exception Handling
+ Passes an empty string as an invalid argument to the native method
+ """
+ d = pbt.sha256('private key')
+ msghash = pbt.electrum_sig_hash('test message')
+ z = pbt.hash_to_int(msghash)
+ v, r, s = pbt.ecdsa_raw_sign(msghash, d)
+ yBit = v - 27
+ with self.assertRaises(ValueError) as context:
+ result = ecnative.recoverPubKeyFromSig(
+ str(z), str(""), str(s),
+ int(yBit))
+
+ self.assertTrue('Empty string' in str(context.exception))
+
+ def test_exception_on_bad_sig(self):
+ """
+ Tests Exception Handling
+ Inputs an invalid number to the native method
+ """
+ d = pbt.sha256('private key')
+ msghash = pbt.electrum_sig_hash('test message')
+ z = pbt.hash_to_int(msghash)
+ v, r, s = pbt.ecdsa_raw_sign(msghash, d)
+ yBit = v - 27
+ badval = "58995174607243353628346858794753620798088291196940745194" \
+ "58148184192713284575299999999999999h"
+ with self.assertRaises(ValueError) as context:
+ result = ecnative.recoverPubKeyFromSig(
+ str(z), str(badval), str(s),
+ int(yBit))
+
+ self.assertTrue('Invalid signature' in str(context.exception))
+
+ def test_exception_on_bad_hash(self):
+ """
+ Tests Exception Handling
+ Inputs an invalid (negative) hash value to the native method
+ """
+ d = pbt.sha256('private key')
+ msghash = pbt.electrum_sig_hash('test message')
+ z = -pbt.hash_to_int(msghash)
+ v, r, s = pbt.ecdsa_raw_sign(msghash, d)
+ yBit = v - 27
+ with self.assertRaises(ValueError) as context:
+ result = ecnative.recoverPubKeyFromSig(
+ str(z), str(r), str(s),
+ int(yBit))
+
+ self.assertTrue('hash' in str(context.exception))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_event_handler.py b/tests/test_event_handler.py
new file mode 100644
index 0000000000..5e917472a5
--- /dev/null
+++ b/tests/test_event_handler.py
@@ -0,0 +1,36 @@
+# Copyright 2016 Intel Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ------------------------------------------------------------------------------
+
+from gossip.event_handler import EventHandler
+
+
+class TestEventHandler:
+ def __init__(self):
+ pass
+
+ def call1(self, ival):
+ return ival < 5
+
+ def call2(self, ival):
+ return ival < 10
+
+ def test_event_handler(self):
+ ev = EventHandler("test")
+ ev += self.call1
+ ev += self.call2
+
+ assert ev.fire(2)
+ assert (not ev.fire(7))
+ assert (not ev.fire(12))