diff --git a/.gitignore b/.gitignore index f589146..601e44d 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ *.iml *.ipr *.iws +*.log build-idea/ out/ diff --git a/application-20220203.log b/application-20220203.log deleted file mode 100644 index 703f77a..0000000 --- a/application-20220203.log +++ /dev/null @@ -1,506 +0,0 @@ -2022-02-03 10:00:38.632 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-03 10:00:38.639 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-03 10:00:38.639 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-03 10:00:38.642 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-03 10:00:38.643 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-03 10:00:38.643 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-03 10:00:38.644 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-03 10:00:38.645 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-03 10:00:38.646 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @67b9b51a -2022-02-03 10:00:38.648 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-03 10:00:38.648 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-03 10:00:38.649 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-03 10:00:38.649 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-03 10:00:38.650 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-03 10:00:38.651 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-03 10:00:38.651 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-03 10:00:38.652 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-03 10:00:38.653 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-03 10:00:38.653 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-03 10:00:38.669 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-03 10:00:38.707 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-03 10:00:38.708 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-03 10:00:38.715 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-03 10:00:38.716 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-03 10:00:38.722 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-03 10:00:38.736 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 10:00:38.768 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-03 10:00:38.768 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-03 10:00:38.771 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-03 10:00:38.771 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-03 10:00:38.771 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-03 10:00:38.771 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-03 10:00:38.771 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-03 10:00:38.771 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-03 10:00:38.772 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-03 10:00:38.772 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-03 10:00:38.772 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-03 10:00:38.772 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-03 10:00:38.772 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-03 10:00:38.772 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-03 10:00:38.816 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 10:00:38.840 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 93433 (auto-detected) -2022-02-03 10:00:38.843 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-03 10:00:38.843 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-03 10:00:38.847 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-03 10:00:38.849 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-03 10:00:38.852 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-03 10:00:38.879 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-03 10:00:38.879 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-03 10:00:38.879 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-03 10:00:38.904 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-03 10:00:38.906 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} -2022-02-03 10:00:38.907 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 10:00:38.908 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 10:00:38.909 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-03 10:00:38.910 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-03 10:23:18.255 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-03 10:23:18.263 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-03 10:23:18.264 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-03 10:23:18.266 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-03 10:23:18.267 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-03 10:23:18.268 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-03 10:23:18.269 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-03 10:23:18.269 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-03 10:23:18.270 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @31920ade -2022-02-03 10:23:18.272 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-03 10:23:18.272 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-03 10:23:18.273 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-03 10:23:18.273 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-03 10:23:18.273 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-03 10:23:18.275 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-03 10:23:18.276 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-03 10:23:18.276 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-03 10:23:18.278 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-03 10:23:18.278 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-03 10:23:18.318 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [force_merge], size [1], queue size [unbounded] -2022-02-03 10:23:18.325 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_started], core [1], max [24], keep alive [5m] -2022-02-03 10:23:18.325 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [listener], size [6], queue size [unbounded] -2022-02-03 10:23:18.325 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [refresh], core [1], max [6], keep alive [5m] -2022-02-03 10:23:18.328 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_write], size [5], queue size [1k] -2022-02-03 10:23:18.329 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [generic], core [4], max [128], keep alive [30s] -2022-02-03 10:23:18.329 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [warmer], core [1], max [5], keep alive [5m] -2022-02-03 10:23:18.332 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search] will adjust queue by [50] when determining automatic queue size -2022-02-03 10:23:18.332 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search], size [19], queue size [1k] -2022-02-03 10:23:18.333 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [flush], core [1], max [5], keep alive [5m] -2022-02-03 10:23:18.333 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_store], core [1], max [24], keep alive [5m] -2022-02-03 10:23:18.334 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [management], core [1], max [5], keep alive [5m] -2022-02-03 10:23:18.334 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [get], size [12], queue size [1k] -2022-02-03 10:23:18.334 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [analyze], size [1], queue size [16] -2022-02-03 10:23:18.335 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_read], size [5], queue size [2k] -2022-02-03 10:23:18.335 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [write], size [12], queue size [10k] -2022-02-03 10:23:18.336 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [snapshot], core [1], max [5], keep alive [5m] -2022-02-03 10:23:18.336 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search_throttled] will adjust queue by [50] when determining automatic queue size -2022-02-03 10:23:18.336 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search_throttled], size [1], queue size [100] -2022-02-03 10:23:18.344 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-03 10:23:18.365 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-03 10:23:18.365 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-03 10:23:18.374 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-03 10:23:18.374 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-03 10:23:18.381 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-03 10:23:18.391 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 10:23:18.415 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-03 10:23:18.415 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-03 10:23:18.417 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-03 10:23:18.417 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-03 10:23:18.417 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-03 10:23:18.418 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-03 10:23:18.446 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 10:23:18.466 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 98817 (auto-detected) -2022-02-03 10:23:18.467 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-03 10:23:18.467 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-03 10:23:18.470 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-03 10:23:18.470 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-03 10:23:18.472 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-03 10:23:18.491 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-03 10:23:18.491 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-03 10:23:18.491 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-03 10:23:18.508 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-03 10:23:18.509 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-03 10:23:18.511 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 10:23:18.512 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 10:23:18.513 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-03 10:23:18.514 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} -2022-02-03 10:30:37.916 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-03 10:30:37.938 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-03 10:30:37.940 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-03 10:30:37.943 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-03 10:30:37.944 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-03 10:30:37.945 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-03 10:30:37.947 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-03 10:30:37.951 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-03 10:30:37.955 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @31920ade -2022-02-03 10:30:37.958 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-03 10:30:37.960 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-03 10:30:37.962 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-03 10:30:37.962 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-03 10:30:37.963 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-03 10:30:37.964 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-03 10:30:37.967 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-03 10:30:37.968 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-03 10:30:37.972 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-03 10:30:37.973 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-03 10:30:38.023 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [force_merge], size [1], queue size [unbounded] -2022-02-03 10:30:38.024 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_started], core [1], max [24], keep alive [5m] -2022-02-03 10:30:38.025 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [listener], size [6], queue size [unbounded] -2022-02-03 10:30:38.025 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [refresh], core [1], max [6], keep alive [5m] -2022-02-03 10:30:38.028 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_write], size [5], queue size [1k] -2022-02-03 10:30:38.028 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [generic], core [4], max [128], keep alive [30s] -2022-02-03 10:30:38.029 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [warmer], core [1], max [5], keep alive [5m] -2022-02-03 10:30:38.031 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search] will adjust queue by [50] when determining automatic queue size -2022-02-03 10:30:38.031 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search], size [19], queue size [1k] -2022-02-03 10:30:38.031 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [flush], core [1], max [5], keep alive [5m] -2022-02-03 10:30:38.032 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_store], core [1], max [24], keep alive [5m] -2022-02-03 10:30:38.032 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [management], core [1], max [5], keep alive [5m] -2022-02-03 10:30:38.032 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [get], size [12], queue size [1k] -2022-02-03 10:30:38.033 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [analyze], size [1], queue size [16] -2022-02-03 10:30:38.033 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_read], size [5], queue size [2k] -2022-02-03 10:30:38.033 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [write], size [12], queue size [10k] -2022-02-03 10:30:38.034 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [snapshot], core [1], max [5], keep alive [5m] -2022-02-03 10:30:38.034 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search_throttled] will adjust queue by [50] when determining automatic queue size -2022-02-03 10:30:38.034 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search_throttled], size [1], queue size [100] -2022-02-03 10:30:38.044 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-03 10:30:38.067 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-03 10:30:38.067 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-03 10:30:38.074 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-03 10:30:38.074 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-03 10:30:38.082 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-03 10:30:38.103 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 10:30:38.158 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-03 10:30:38.158 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-03 10:30:38.166 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-03 10:30:38.168 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-03 10:30:38.169 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-03 10:30:38.170 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-03 10:30:38.170 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-03 10:30:38.173 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-03 10:30:38.174 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-03 10:30:38.175 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-03 10:30:38.175 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-03 10:30:38.175 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-03 10:30:38.175 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-03 10:30:38.175 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-03 10:30:38.241 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 10:30:38.268 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 99384 (auto-detected) -2022-02-03 10:30:38.271 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-03 10:30:38.271 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-03 10:30:38.275 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-03 10:30:38.277 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-03 10:30:38.280 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-03 10:30:38.310 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-03 10:30:38.310 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-03 10:30:38.310 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-03 10:30:38.341 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-03 10:30:38.344 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-03 10:30:38.348 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 10:30:38.350 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 10:30:38.352 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-03 10:30:38.354 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} -2022-02-03 10:30:54.128 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-03 10:30:54.138 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-03 10:30:54.138 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-03 10:30:54.140 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-03 10:30:54.140 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-03 10:30:54.141 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-03 10:30:54.141 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-03 10:30:54.142 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-03 10:30:54.143 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @31920ade -2022-02-03 10:30:54.144 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-03 10:30:54.144 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-03 10:30:54.145 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-03 10:30:54.146 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-03 10:30:54.146 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-03 10:30:54.147 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-03 10:30:54.148 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-03 10:30:54.148 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-03 10:30:54.149 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-03 10:30:54.149 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-03 10:30:54.187 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [force_merge], size [1], queue size [unbounded] -2022-02-03 10:30:54.191 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_started], core [1], max [24], keep alive [5m] -2022-02-03 10:30:54.191 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [listener], size [6], queue size [unbounded] -2022-02-03 10:30:54.191 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [refresh], core [1], max [6], keep alive [5m] -2022-02-03 10:30:54.194 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_write], size [5], queue size [1k] -2022-02-03 10:30:54.194 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [generic], core [4], max [128], keep alive [30s] -2022-02-03 10:30:54.194 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [warmer], core [1], max [5], keep alive [5m] -2022-02-03 10:30:54.196 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search] will adjust queue by [50] when determining automatic queue size -2022-02-03 10:30:54.197 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search], size [19], queue size [1k] -2022-02-03 10:30:54.197 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [flush], core [1], max [5], keep alive [5m] -2022-02-03 10:30:54.198 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_store], core [1], max [24], keep alive [5m] -2022-02-03 10:30:54.198 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [management], core [1], max [5], keep alive [5m] -2022-02-03 10:30:54.198 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [get], size [12], queue size [1k] -2022-02-03 10:30:54.199 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [analyze], size [1], queue size [16] -2022-02-03 10:30:54.199 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_read], size [5], queue size [2k] -2022-02-03 10:30:54.199 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [write], size [12], queue size [10k] -2022-02-03 10:30:54.200 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [snapshot], core [1], max [5], keep alive [5m] -2022-02-03 10:30:54.200 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search_throttled] will adjust queue by [50] when determining automatic queue size -2022-02-03 10:30:54.200 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search_throttled], size [1], queue size [100] -2022-02-03 10:30:54.213 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-03 10:30:54.228 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-03 10:30:54.228 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-03 10:30:54.234 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-03 10:30:54.234 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-03 10:30:54.240 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-03 10:30:54.251 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 10:30:54.272 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-03 10:30:54.272 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-03 10:30:54.274 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-03 10:30:54.275 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-03 10:30:54.275 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-03 10:30:54.275 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-03 10:30:54.303 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 10:30:54.325 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 99431 (auto-detected) -2022-02-03 10:30:54.327 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-03 10:30:54.328 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-03 10:30:54.331 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-03 10:30:54.331 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-03 10:30:54.334 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-03 10:30:54.359 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-03 10:30:54.359 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-03 10:30:54.359 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-03 10:30:54.380 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-03 10:30:54.381 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-03 10:30:54.384 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 10:30:54.385 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 10:30:54.387 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-03 10:30:54.388 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} -2022-02-03 12:54:07.646 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-03 12:54:07.653 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-03 12:54:07.653 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-03 12:54:07.655 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-03 12:54:07.656 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-03 12:54:07.656 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-03 12:54:07.657 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-03 12:54:07.658 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-03 12:54:07.659 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @31920ade -2022-02-03 12:54:07.660 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-03 12:54:07.661 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-03 12:54:07.661 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-03 12:54:07.662 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-03 12:54:07.662 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-03 12:54:07.663 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-03 12:54:07.664 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-03 12:54:07.664 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-03 12:54:07.665 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-03 12:54:07.666 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-03 12:54:07.701 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [force_merge], size [1], queue size [unbounded] -2022-02-03 12:54:07.702 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_started], core [1], max [24], keep alive [5m] -2022-02-03 12:54:07.703 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [listener], size [6], queue size [unbounded] -2022-02-03 12:54:07.703 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [refresh], core [1], max [6], keep alive [5m] -2022-02-03 12:54:07.707 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_write], size [5], queue size [1k] -2022-02-03 12:54:07.708 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [generic], core [4], max [128], keep alive [30s] -2022-02-03 12:54:07.708 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [warmer], core [1], max [5], keep alive [5m] -2022-02-03 12:54:07.710 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search] will adjust queue by [50] when determining automatic queue size -2022-02-03 12:54:07.710 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search], size [19], queue size [1k] -2022-02-03 12:54:07.710 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [flush], core [1], max [5], keep alive [5m] -2022-02-03 12:54:07.711 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_store], core [1], max [24], keep alive [5m] -2022-02-03 12:54:07.711 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [management], core [1], max [5], keep alive [5m] -2022-02-03 12:54:07.711 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [get], size [12], queue size [1k] -2022-02-03 12:54:07.711 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [analyze], size [1], queue size [16] -2022-02-03 12:54:07.712 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_read], size [5], queue size [2k] -2022-02-03 12:54:07.712 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [write], size [12], queue size [10k] -2022-02-03 12:54:07.712 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [snapshot], core [1], max [5], keep alive [5m] -2022-02-03 12:54:07.712 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search_throttled] will adjust queue by [50] when determining automatic queue size -2022-02-03 12:54:07.713 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search_throttled], size [1], queue size [100] -2022-02-03 12:54:07.727 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-03 12:54:07.747 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-03 12:54:07.748 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-03 12:54:07.754 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-03 12:54:07.755 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-03 12:54:07.761 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-03 12:54:07.772 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 12:54:07.799 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-03 12:54:07.799 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-03 12:54:07.802 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-03 12:54:07.802 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-03 12:54:07.802 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-03 12:54:07.802 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-03 12:54:07.802 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-03 12:54:07.802 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-03 12:54:07.802 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-03 12:54:07.803 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-03 12:54:07.803 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-03 12:54:07.803 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-03 12:54:07.803 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-03 12:54:07.803 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-03 12:54:07.841 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 12:54:07.862 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 11873 (auto-detected) -2022-02-03 12:54:07.864 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-03 12:54:07.864 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-03 12:54:07.867 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-03 12:54:07.868 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-03 12:54:07.870 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-03 12:54:07.895 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-03 12:54:07.895 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-03 12:54:07.895 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-03 12:54:07.915 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-03 12:54:07.916 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-03 12:54:07.918 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 12:54:07.920 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 12:54:07.921 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-03 12:54:07.922 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} -2022-02-03 15:22:56.351 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-03 15:22:56.361 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-03 15:22:56.361 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-03 15:22:56.363 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-03 15:22:56.364 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-03 15:22:56.365 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-03 15:22:56.366 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-03 15:22:56.367 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-03 15:22:56.368 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @b83a9be -2022-02-03 15:22:56.370 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-03 15:22:56.370 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-03 15:22:56.371 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-03 15:22:56.371 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-03 15:22:56.371 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-03 15:22:56.372 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-03 15:22:56.373 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-03 15:22:56.373 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-03 15:22:56.374 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-03 15:22:56.374 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-03 15:22:56.420 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [force_merge], size [1], queue size [unbounded] -2022-02-03 15:22:56.422 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_started], core [1], max [24], keep alive [5m] -2022-02-03 15:22:56.422 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [listener], size [6], queue size [unbounded] -2022-02-03 15:22:56.422 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [refresh], core [1], max [6], keep alive [5m] -2022-02-03 15:22:56.425 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_write], size [5], queue size [1k] -2022-02-03 15:22:56.426 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [generic], core [4], max [128], keep alive [30s] -2022-02-03 15:22:56.426 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [warmer], core [1], max [5], keep alive [5m] -2022-02-03 15:22:56.428 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search] will adjust queue by [50] when determining automatic queue size -2022-02-03 15:22:56.428 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search], size [19], queue size [1k] -2022-02-03 15:22:56.429 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [flush], core [1], max [5], keep alive [5m] -2022-02-03 15:22:56.429 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_store], core [1], max [24], keep alive [5m] -2022-02-03 15:22:56.429 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [management], core [1], max [5], keep alive [5m] -2022-02-03 15:22:56.430 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [get], size [12], queue size [1k] -2022-02-03 15:22:56.430 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [analyze], size [1], queue size [16] -2022-02-03 15:22:56.430 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_read], size [5], queue size [2k] -2022-02-03 15:22:56.430 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [write], size [12], queue size [10k] -2022-02-03 15:22:56.431 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [snapshot], core [1], max [5], keep alive [5m] -2022-02-03 15:22:56.431 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search_throttled] will adjust queue by [50] when determining automatic queue size -2022-02-03 15:22:56.431 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search_throttled], size [1], queue size [100] -2022-02-03 15:22:56.440 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-03 15:22:56.463 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-03 15:22:56.463 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-03 15:22:56.470 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-03 15:22:56.470 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-03 15:22:56.477 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-03 15:22:56.486 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 15:22:56.512 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-03 15:22:56.513 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-03 15:22:56.516 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-03 15:22:56.516 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-03 15:22:56.516 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-03 15:22:56.516 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-03 15:22:56.518 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-03 15:22:56.518 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-03 15:22:56.518 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-03 15:22:56.518 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-03 15:22:56.519 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-03 15:22:56.519 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-03 15:22:56.519 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-03 15:22:56.519 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-03 15:22:56.554 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 15:22:56.575 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 23813 (auto-detected) -2022-02-03 15:22:56.577 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-03 15:22:56.577 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-03 15:22:56.580 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-03 15:22:56.583 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-03 15:22:56.585 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-03 15:22:56.614 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-03 15:22:56.614 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-03 15:22:56.614 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-03 15:22:56.636 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-03 15:22:56.638 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-03 15:22:56.640 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 15:22:56.641 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 15:22:56.642 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-03 15:22:56.643 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} -2022-02-03 15:24:23.490 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-03 15:24:23.497 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-03 15:24:23.497 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-03 15:24:23.499 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-03 15:24:23.499 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-03 15:24:23.500 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-03 15:24:23.501 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-03 15:24:23.501 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-03 15:24:23.502 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @b83a9be -2022-02-03 15:24:23.503 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-03 15:24:23.504 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-03 15:24:23.504 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-03 15:24:23.505 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-03 15:24:23.505 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-03 15:24:23.506 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-03 15:24:23.506 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-03 15:24:23.506 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-03 15:24:23.507 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-03 15:24:23.507 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-03 15:24:23.558 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [force_merge], size [1], queue size [unbounded] -2022-02-03 15:24:23.559 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_started], core [1], max [24], keep alive [5m] -2022-02-03 15:24:23.559 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [listener], size [6], queue size [unbounded] -2022-02-03 15:24:23.560 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [refresh], core [1], max [6], keep alive [5m] -2022-02-03 15:24:23.563 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_write], size [5], queue size [1k] -2022-02-03 15:24:23.563 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [generic], core [4], max [128], keep alive [30s] -2022-02-03 15:24:23.564 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [warmer], core [1], max [5], keep alive [5m] -2022-02-03 15:24:23.566 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search] will adjust queue by [50] when determining automatic queue size -2022-02-03 15:24:23.566 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search], size [19], queue size [1k] -2022-02-03 15:24:23.566 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [flush], core [1], max [5], keep alive [5m] -2022-02-03 15:24:23.567 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_store], core [1], max [24], keep alive [5m] -2022-02-03 15:24:23.567 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [management], core [1], max [5], keep alive [5m] -2022-02-03 15:24:23.567 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [get], size [12], queue size [1k] -2022-02-03 15:24:23.567 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [analyze], size [1], queue size [16] -2022-02-03 15:24:23.568 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_read], size [5], queue size [2k] -2022-02-03 15:24:23.568 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [write], size [12], queue size [10k] -2022-02-03 15:24:23.568 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [snapshot], core [1], max [5], keep alive [5m] -2022-02-03 15:24:23.568 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search_throttled] will adjust queue by [50] when determining automatic queue size -2022-02-03 15:24:23.569 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search_throttled], size [1], queue size [100] -2022-02-03 15:24:23.577 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-03 15:24:23.601 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-03 15:24:23.601 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-03 15:24:23.610 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-03 15:24:23.610 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-03 15:24:23.618 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-03 15:24:23.634 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 15:24:23.664 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-03 15:24:23.664 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-03 15:24:23.666 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-03 15:24:23.666 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-03 15:24:23.666 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-03 15:24:23.666 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-03 15:24:23.666 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-03 15:24:23.667 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-03 15:24:23.667 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-03 15:24:23.667 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-03 15:24:23.667 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-03 15:24:23.667 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-03 15:24:23.667 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-03 15:24:23.667 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-03 15:24:23.703 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 15:24:23.721 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 24122 (auto-detected) -2022-02-03 15:24:23.722 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-03 15:24:23.722 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-03 15:24:23.724 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-03 15:24:23.725 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-03 15:24:23.727 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-03 15:24:23.744 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-03 15:24:23.744 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-03 15:24:23.744 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-03 15:24:23.763 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-03 15:24:23.766 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-03 15:24:23.773 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-03 15:24:23.775 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-03 15:24:23.779 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-03 15:24:23.780 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} diff --git a/application-20220204.log b/application-20220204.log deleted file mode 100644 index 1058c5f..0000000 --- a/application-20220204.log +++ /dev/null @@ -1,150 +0,0 @@ -2022-02-04 09:55:37.293 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-04 09:55:37.301 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-04 09:55:37.301 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-04 09:55:37.303 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-04 09:55:37.304 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-04 09:55:37.305 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-04 09:55:37.305 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-04 09:55:37.306 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-04 09:55:37.307 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @b83a9be -2022-02-04 09:55:37.308 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-04 09:55:37.309 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-04 09:55:37.310 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-04 09:55:37.310 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-04 09:55:37.310 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-04 09:55:37.311 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-04 09:55:37.312 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-04 09:55:37.312 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-04 09:55:37.315 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-04 09:55:37.315 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-04 09:55:37.351 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [force_merge], size [1], queue size [unbounded] -2022-02-04 09:55:37.352 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_started], core [1], max [24], keep alive [5m] -2022-02-04 09:55:37.352 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [listener], size [6], queue size [unbounded] -2022-02-04 09:55:37.352 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [refresh], core [1], max [6], keep alive [5m] -2022-02-04 09:55:37.355 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_write], size [5], queue size [1k] -2022-02-04 09:55:37.355 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [generic], core [4], max [128], keep alive [30s] -2022-02-04 09:55:37.356 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [warmer], core [1], max [5], keep alive [5m] -2022-02-04 09:55:37.357 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search] will adjust queue by [50] when determining automatic queue size -2022-02-04 09:55:37.358 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search], size [19], queue size [1k] -2022-02-04 09:55:37.358 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [flush], core [1], max [5], keep alive [5m] -2022-02-04 09:55:37.358 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_store], core [1], max [24], keep alive [5m] -2022-02-04 09:55:37.358 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [management], core [1], max [5], keep alive [5m] -2022-02-04 09:55:37.359 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [get], size [12], queue size [1k] -2022-02-04 09:55:37.359 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [analyze], size [1], queue size [16] -2022-02-04 09:55:37.359 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_read], size [5], queue size [2k] -2022-02-04 09:55:37.359 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [write], size [12], queue size [10k] -2022-02-04 09:55:37.360 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [snapshot], core [1], max [5], keep alive [5m] -2022-02-04 09:55:37.360 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search_throttled] will adjust queue by [50] when determining automatic queue size -2022-02-04 09:55:37.360 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search_throttled], size [1], queue size [100] -2022-02-04 09:55:37.368 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-04 09:55:37.388 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-04 09:55:37.388 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-04 09:55:37.395 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-04 09:55:37.395 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-04 09:55:37.401 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-04 09:55:37.411 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-04 09:55:37.437 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-04 09:55:37.437 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-04 09:55:37.440 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-04 09:55:37.441 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-04 09:55:37.441 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-04 09:55:37.441 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-04 09:55:37.478 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-04 09:55:37.496 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 42140 (auto-detected) -2022-02-04 09:55:37.498 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-04 09:55:37.498 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-04 09:55:37.501 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-04 09:55:37.501 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-04 09:55:37.503 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-04 09:55:37.524 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-04 09:55:37.524 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-04 09:55:37.524 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-04 09:55:37.544 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-04 09:55:37.545 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-04 09:55:37.547 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-04 09:55:37.548 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-04 09:55:37.549 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-04 09:55:37.549 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} -2022-02-04 12:00:27.434 [main] DEBUG io.netty.util.internal.logging.InternalLoggerFactory - Using Log4J2 as the default logging framework -2022-02-04 12:00:27.441 [main] DEBUG io.netty.util.internal.PlatformDependent0 - -Dio.netty.noUnsafe: false -2022-02-04 12:00:27.441 [main] DEBUG io.netty.util.internal.PlatformDependent0 - Java version: 14 -2022-02-04 12:00:27.443 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.theUnsafe: available -2022-02-04 12:00:27.444 [main] DEBUG io.netty.util.internal.PlatformDependent0 - sun.misc.Unsafe.copyMemory: available -2022-02-04 12:00:27.445 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Buffer.address: available -2022-02-04 12:00:27.445 [main] DEBUG io.netty.util.internal.PlatformDependent0 - direct buffer constructor: unavailable: Reflective setAccessible(true) disabled -2022-02-04 12:00:27.446 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.Bits.unaligned: available, true -2022-02-04 12:00:27.447 [main] DEBUG io.netty.util.internal.PlatformDependent0 - jdk.internal.misc.Unsafe.allocateUninitializedArray(int): unavailable: class io.netty.util.internal.PlatformDependent0$6 cannot access class jdk.internal.misc.Unsafe (in module java.base) because module java.base does not export jdk.internal.misc to unnamed module @b83a9be -2022-02-04 12:00:27.449 [main] DEBUG io.netty.util.internal.PlatformDependent0 - java.nio.DirectByteBuffer.(long, int): unavailable -2022-02-04 12:00:27.449 [main] DEBUG io.netty.util.internal.PlatformDependent - sun.misc.Unsafe: available -2022-02-04 12:00:27.450 [main] DEBUG io.netty.util.internal.PlatformDependent - maxDirectMemory: 4294967296 bytes (maybe) -2022-02-04 12:00:27.450 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.tmpdir: /var/folders/5j/c7kjyy2j33nc2j6gjbtmskgr0000gs/T (java.io.tmpdir) -2022-02-04 12:00:27.450 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.bitMode: 64 (sun.arch.data.model) -2022-02-04 12:00:27.451 [main] DEBUG io.netty.util.internal.PlatformDependent - Platform: MacOS -2022-02-04 12:00:27.452 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.maxDirectMemory: -1 bytes -2022-02-04 12:00:27.452 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.uninitializedArrayAllocationThreshold: -1 -2022-02-04 12:00:27.453 [main] DEBUG io.netty.util.internal.CleanerJava9 - java.nio.ByteBuffer.cleaner(): available -2022-02-04 12:00:27.453 [main] DEBUG io.netty.util.internal.PlatformDependent - -Dio.netty.noPreferDirect: false -2022-02-04 12:00:27.498 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [force_merge], size [1], queue size [unbounded] -2022-02-04 12:00:27.500 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_started], core [1], max [24], keep alive [5m] -2022-02-04 12:00:27.500 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [listener], size [6], queue size [unbounded] -2022-02-04 12:00:27.501 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [refresh], core [1], max [6], keep alive [5m] -2022-02-04 12:00:27.503 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_write], size [5], queue size [1k] -2022-02-04 12:00:27.504 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [generic], core [4], max [128], keep alive [30s] -2022-02-04 12:00:27.504 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [warmer], core [1], max [5], keep alive [5m] -2022-02-04 12:00:27.506 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search] will adjust queue by [50] when determining automatic queue size -2022-02-04 12:00:27.506 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search], size [19], queue size [1k] -2022-02-04 12:00:27.507 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [flush], core [1], max [5], keep alive [5m] -2022-02-04 12:00:27.507 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [fetch_shard_store], core [1], max [24], keep alive [5m] -2022-02-04 12:00:27.507 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [management], core [1], max [5], keep alive [5m] -2022-02-04 12:00:27.507 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [get], size [12], queue size [1k] -2022-02-04 12:00:27.508 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [analyze], size [1], queue size [16] -2022-02-04 12:00:27.508 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [system_read], size [5], queue size [2k] -2022-02-04 12:00:27.508 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [write], size [12], queue size [10k] -2022-02-04 12:00:27.508 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [snapshot], core [1], max [5], keep alive [5m] -2022-02-04 12:00:27.509 [main] DEBUG org.opensearch.common.util.concurrent.QueueResizingOpenSearchThreadPoolExecutor - thread pool [test/search_throttled] will adjust queue by [50] when determining automatic queue size -2022-02-04 12:00:27.509 [main] DEBUG org.opensearch.threadpool.ThreadPool - created thread pool: name [search_throttled], size [1], queue size [100] -2022-02-04 12:00:27.520 [main] DEBUG io.netty.channel.MultithreadEventLoopGroup - -Dio.netty.eventLoopThreads: 24 -2022-02-04 12:00:27.542 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.initialSize: 1024 -2022-02-04 12:00:27.542 [main] DEBUG io.netty.util.internal.InternalThreadLocalMap - -Dio.netty.threadLocalMap.stringBuilder.maxSize: 4096 -2022-02-04 12:00:27.551 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.noKeySetOptimization: false -2022-02-04 12:00:27.551 [main] DEBUG io.netty.channel.nio.NioEventLoop - -Dio.netty.selectorAutoRebuildThreshold: 512 -2022-02-04 12:00:27.558 [main] DEBUG io.netty.util.internal.PlatformDependent - org.jctools-core.MpscChunkedArrayQueue: available -2022-02-04 12:00:27.569 [main] DEBUG IndependentPlugin - using profile[default], worker_count[12], port[3333], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-04 12:00:27.596 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.level: simple -2022-02-04 12:00:27.596 [main] DEBUG io.netty.util.ResourceLeakDetector - -Dio.netty.leakDetection.targetRecords: 4 -2022-02-04 12:00:27.598 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numHeapArenas: 24 -2022-02-04 12:00:27.598 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.numDirectArenas: 24 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.pageSize: 8192 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxOrder: 11 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.chunkSize: 16777216 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.smallCacheSize: 256 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.normalCacheSize: 64 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedBufferCapacity: 32768 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimInterval: 8192 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.cacheTrimIntervalMillis: 0 -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.useCacheForAllThreads: true -2022-02-04 12:00:27.599 [main] DEBUG io.netty.buffer.PooledByteBufAllocator - -Dio.netty.allocator.maxCachedByteBuffersPerChunk: 1023 -2022-02-04 12:00:27.635 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-04 12:00:27.654 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.processId: 55188 (auto-detected) -2022-02-04 12:00:27.656 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv4Stack: false -2022-02-04 12:00:27.656 [main] DEBUG io.netty.util.NetUtil - -Djava.net.preferIPv6Addresses: false -2022-02-04 12:00:27.660 [main] DEBUG io.netty.util.NetUtilInitializations - Loopback interface: lo0 (lo0, 0:0:0:0:0:0:0:1%lo0) -2022-02-04 12:00:27.661 [main] DEBUG io.netty.util.NetUtil - Failed to get SOMAXCONN from sysctl and file /proc/sys/net/core/somaxconn. Default: 128 -2022-02-04 12:00:27.664 [main] DEBUG io.netty.channel.DefaultChannelId - -Dio.netty.machineId: 88:66:5a:ff:fe:37:c9:4e (auto-detected) -2022-02-04 12:00:27.687 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.allocator.type: pooled -2022-02-04 12:00:27.687 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.threadLocalDirectBufferSize: 0 -2022-02-04 12:00:27.687 [main] DEBUG io.netty.buffer.ByteBufUtil - -Dio.netty.maxThreadLocalCharBufferSize: 16384 -2022-02-04 12:00:27.708 [main] DEBUG IndependentPlugin - Bound profile [default] to address {[::1]:3333} -2022-02-04 12:00:27.710 [main] DEBUG IndependentPlugin - Bound profile [default] to address {127.0.0.1:3333} -2022-02-04 12:00:27.712 [main] DEBUG IndependentPlugin - using profile[test], worker_count[12], port[5555], bind_host[[]], publish_host[[]], receive_predictor[64kb->64kb] -2022-02-04 12:00:27.713 [main] DEBUG IndependentPlugin - binding server bootstrap to: [::1, 127.0.0.1] -2022-02-04 12:00:27.714 [main] DEBUG IndependentPlugin - Bound profile [test] to address {[::1]:5555} -2022-02-04 12:00:27.715 [main] DEBUG IndependentPlugin - Bound profile [test] to address {127.0.0.1:5555} diff --git a/build.gradle b/build.gradle index c4edb2f..9d9e139 100644 --- a/build.gradle +++ b/build.gradle @@ -1,24 +1,7 @@ -//buildscript { -// ext { -// opensearch_version = System.getProperty("opensearch.version", "2.0.0-SNAPSHOT") -// } -// -// repositories { -// //mavenLocal() -// maven { url "https://aws.oss.sonatype.org/content/repositories/snapshots" } -// mavenCentral() -// } -// -//// dependencies { -//// classpath "org.opensearch.gradle:build-tools:${opensearch_version}" -//// } -//} - plugins { id 'java' } -//apply plugin: 'opensearch.opensearchplugin' ext { projectSubstitutions = [:] @@ -26,14 +9,10 @@ ext { noticeFile = rootProject.file('NOTICE.txt') } -//opensearchplugin { -// description 'The S3 repository plugin adds S3 repositories' -// classname 'IndependentPlugin' -//} apply plugin: 'application' -mainClassName = 'IndependentPlugin' +mainClassName = 'transportservice.RunPlugin' group 'org.example' version '1.0-SNAPSHOT' @@ -47,22 +26,33 @@ dependencies { implementation "org.opensearch:opensearch:2.0.0-SNAPSHOT" implementation group: 'org.apache.logging.log4j', name: 'log4j-api', version: '2.17.1' implementation group: 'org.apache.logging.log4j', name: 'log4j-core', version: '2.17.1' -// implementation group: 'io.netty', name: 'netty-all', version: '4.1.5.Final' -// implementation group: 'io.netty', name: 'netty-transport-rxtx', version: '4.1.5.Final' implementation "io.netty:netty-all:4.1.73.Final" + testCompileOnly ("junit:junit:4.13.2") { + exclude module : 'hamcrest' + exclude module : 'hamcrest-core' + } + // Keeping it for now. Will remove it later once figure out the right dependencies // implementation "io.netty:netty-buffer:4.1.73.Final" // implementation "io.netty:netty-codec:4.1.73.Final" // implementation "io.netty:netty-codec-http:4.1.73.Final" // implementation "io.netty:netty-common:4.1.73.Final" // implementation "io.netty:netty-handler:4.1.73.Final" // implementation "io.netty:netty-resolver:4.1.73.Final" -// implementation "io.netty:netty-transport:4.1.73.Final" +// implementation "io.netty:netty-transportservice.transport:4.1.73.Final" //implementation "org.apache.logging.log4j:log4j-1.2-api:2.17.1" - testImplementation 'org.junit.jupiter:junit-jupiter-api:5.8.1' - testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.8.1' + implementation 'javax.xml.bind:jaxb-api:2.2.2' + testImplementation 'org.junit.jupiter:junit-jupiter-api:5.8.2' + testRuntimeOnly 'org.junit.jupiter:junit-jupiter-engine:5.8.2' + testImplementation "org.opensearch.test:framework:2.0.0-SNAPSHOT" } test { useJUnitPlatform() -} \ No newline at end of file + jvmArgs '--enable-preview' + systemProperty 'tests.security.manager', 'false' + testLogging.showStandardStreams = true + testLogging { + events "passed", "skipped", "failed", "standardOut", "standardError" + } +} diff --git a/src/main/java/IndependentPlugin.java b/src/main/java/IndependentPlugin.java deleted file mode 100644 index 1eebc61..0000000 --- a/src/main/java/IndependentPlugin.java +++ /dev/null @@ -1,604 +0,0 @@ -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; -import io.netty.channel.*; -import io.netty.channel.socket.nio.NioChannelOption; -import io.netty.util.AttributeKey; -import netty4.Netty4TcpChannel; -import netty4.Netty4TcpServerChannel; -import org.opensearch.Version; -import org.opensearch.common.bytes.BytesReference; -import org.opensearch.common.bytes.ReleasableBytesReference; -import org.opensearch.common.util.concurrent.ThreadContext; -import transport.TcpChannel; -import transport.OutboundHandler; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.logging.log4j.message.ParameterizedMessage; -import org.opensearch.ExceptionsHelper; -import org.opensearch.action.ActionListener; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.common.Strings; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.network.NetworkService; -import org.opensearch.common.settings.Setting; -import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.PortsRange; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeUnit; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.util.concurrent.ConcurrentCollections; -import org.opensearch.common.util.concurrent.OpenSearchExecutors; -import org.opensearch.core.internal.net.NetUtils; -import org.opensearch.env.Environment; -import org.opensearch.plugins.DiscoveryPlugin; -import org.opensearch.plugins.Plugin; -import org.opensearch.plugins.PluginInfo; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.*; -import io.netty.bootstrap.ServerBootstrap; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.SocketOption; -import java.net.UnknownHostException; -import java.util.*; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; - -import static org.opensearch.common.settings.Setting.byteSizeSetting; -import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; - -public abstract class IndependentPlugin extends AbstractLifecycleComponent implements Transport { - protected static Set profileSettings = getProfileSettings(Settings.builder().put("transport.profiles.test.port", "5555").put("transport.profiles.default.port", "3333").build()); - private static final Logger logger = LogManager.getLogger(IndependentPlugin.class); - private static Environment env; - private static final ReadWriteLock closeLock = new ReentrantReadWriteLock(); - private static final Map> serverChannels = newConcurrentMap(); - - private static volatile BoundTransportAddress boundAddress; - private static final ConcurrentMap profileBoundAddresses = newConcurrentMap(); - private final Map serverBootstraps = newConcurrentMap(); - static final AttributeKey SERVER_CHANNEL_KEY = AttributeKey.newInstance("es-server-channel"); - private static SharedGroupFactory sharedGroupFactory; - private static volatile SharedGroupFactory.SharedGroup sharedGroup; - private final ByteSizeValue receivePredictorMin; - private final ByteSizeValue receivePredictorMax; - private final RecvByteBufAllocator recvByteBufAllocator; - static final AttributeKey CHANNEL_KEY = AttributeKey.newInstance("es-channel"); - private final Set acceptedChannels = ConcurrentCollections.newConcurrentSet(); - protected final ThreadPool threadPool; - private OutboundHandler handler; - private TcpChannel channel; - private DiscoveryNode node; - private final TransportRequestOptions options = TransportRequestOptions.EMPTY; - private final AtomicReference> message = new AtomicReference<>(); - private static final Settings settings = Settings.builder() - .put("node.name", "NettySizeHeaderFrameDecoderTests") - .put(TransportSettings.BIND_HOST.getKey(), "127.0.0.1") - .put(TransportSettings.PORT.getKey(), "0") - .build(); - - protected void serverAcceptedChannel(TcpChannel channel) { - boolean addedOnThisCall = acceptedChannels.add(channel); - assert addedOnThisCall : "Channel should only be added to accepted channel set once"; - // Mark the channel init time - channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); - channel.addCloseListener(ActionListener.wrap(() -> acceptedChannels.remove(channel))); - logger.trace(() -> new ParameterizedMessage("Tcp transport channel accepted: {}", channel)); - } - - // Another class - private void addClosedExceptionLogger(Channel channel) { - channel.closeFuture().addListener(f -> { - if (f.isSuccess() == false) { - logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", channel), f.cause()); - } - }); - } - - protected class ServerChannelInitializer extends ChannelInitializer { - - protected final String name; - private final NettyByteBufSizer sizer = new NettyByteBufSizer(); - - protected ServerChannelInitializer(String name) { - this.name = name; - } - - @Override - protected void initChannel(Channel ch) throws Exception { - addClosedExceptionLogger(ch); - assert ch instanceof Netty4NioSocketChannel; - NetUtils.tryEnsureReasonableKeepAliveConfig(((Netty4NioSocketChannel) ch).javaChannel()); - Netty4TcpChannel nettyTcpChannel = new Netty4TcpChannel(ch, true, name, ch.newSucceededFuture()); - ch.attr(CHANNEL_KEY).set(nettyTcpChannel); - ch.pipeline().addLast("byte_buf_sizer", sizer); - ch.pipeline().addLast("logging", new OpenSearchLoggingHandler()); - //ch.pipeline().addLast("dispatcher", new Netty4MessageChannelHandler(pageCacheRecycler, Netty4Transport.this)); - serverAcceptedChannel((TcpChannel) nettyTcpChannel); - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - ExceptionsHelper.maybeDieOnAnotherThread(cause); - super.exceptionCaught(ctx, cause); - } - } - - - // - public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( - "transport.netty.receive_predictor_size", - new ByteSizeValue(64, ByteSizeUnit.KB), - Setting.Property.NodeScope - ); - - public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting( - "transport.netty.receive_predictor_min", - NETTY_RECEIVE_PREDICTOR_SIZE, - Setting.Property.NodeScope - ); - - public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting( - "transport.netty.receive_predictor_max", - NETTY_RECEIVE_PREDICTOR_SIZE, - Setting.Property.NodeScope - ); - - public static final Setting WORKER_COUNT = new Setting<>( - "transport.netty.worker_count", - (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), - (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), - Setting.Property.NodeScope - ); - - protected IndependentPlugin(SharedGroupFactory sharedGroupFactory) { - this.sharedGroupFactory = sharedGroupFactory; - this.threadPool = new TestThreadPool("test");; - this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings); - this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings); - if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) { - recvByteBufAllocator = new FixedRecvByteBufAllocator((int) receivePredictorMax.getBytes()); - } else { - recvByteBufAllocator = new AdaptiveRecvByteBufAllocator( - (int) receivePredictorMin.getBytes(), - (int) receivePredictorMin.getBytes(), - (int) receivePredictorMax.getBytes() - ); - } - } - - - // Another clas - - - public static Set getProfileSettings(Settings settings) { - HashSet profiles = new HashSet<>(); - boolean isDefaultSet = false; - for (String profile : settings.getGroups("transport.profiles.", true).keySet()) { - profiles.add(new NewPlugin.ProfileSettings(settings, profile)); - if (TransportSettings.DEFAULT_PROFILE.equals(profile)) { - isDefaultSet = true; - } - } - if (isDefaultSet == false) { - profiles.add(new NewPlugin.ProfileSettings(settings, TransportSettings.DEFAULT_PROFILE)); - } - return Collections.unmodifiableSet(profiles); - } - - public static Settings settings() { - return env.settings(); - } - - private static List getCustomNameResolvers(List discoveryPlugins) { - List customNameResolvers = new ArrayList<>(); - for (DiscoveryPlugin discoveryPlugin : discoveryPlugins) { - NetworkService.CustomNameResolver customNameResolver = discoveryPlugin.getCustomNameResolver(settings()); - if (customNameResolver != null) { - customNameResolvers.add(customNameResolver); - } - } - return customNameResolvers; - } - - protected ChannelHandler getServerChannelInitializer(String name) { - return new ServerChannelInitializer(name); - } - - public static List filterPlugins(Class type) { - final List> plugins = new ArrayList<>(); - return plugins.stream().filter(x -> type.isAssignableFrom(x.v2().getClass())).map(p -> ((T) p.v2())).collect(Collectors.toList()); - } - - final static NetworkService networkService = new NetworkService( - getCustomNameResolvers(filterPlugins(DiscoveryPlugin.class)) - ); - - protected TcpServerChannel bind(String name, InetSocketAddress address) { - Channel channel = serverBootstraps.get(name).bind(address).syncUninterruptibly().channel(); - Netty4TcpServerChannel esChannel = new Netty4TcpServerChannel(channel); - channel.attr(SERVER_CHANNEL_KEY).set(esChannel); - return esChannel; - } - - private InetSocketAddress bindToPort(final String name, final InetAddress hostAddress, String port) { - PortsRange portsRange = new PortsRange(port); - final AtomicReference lastException = new AtomicReference<>(); - final AtomicReference boundSocket = new AtomicReference<>(); - closeLock.writeLock().lock(); - try { - // No need for locking here since Lifecycle objects can't move from STARTED to INITIALIZED - if (lifecycle.initialized() == false && lifecycle.started() == false) { - throw new IllegalStateException("transport has been stopped"); - } - boolean success = portsRange.iterate(portNumber -> { - try { - TcpServerChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); - serverChannels.computeIfAbsent(name, k -> new ArrayList<>()).add(channel); - boundSocket.set(channel.getLocalAddress()); - } catch (Exception e) { - lastException.set(e); - return false; - } - return true; - }); - if (!success) { - throw new BindTransportException( - "Failed to bind to " + NetworkAddress.format(hostAddress, portsRange), - lastException.get() - ); - } - } finally { - closeLock.writeLock().unlock(); - } - if (logger.isDebugEnabled()) { - logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); - } - - return boundSocket.get(); - } - - public static int resolvePublishPort(NewPlugin.ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = profileSettings.publishPort; - - // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress - if (publishPort < 0) { - for (InetSocketAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final IntSet ports = new IntHashSet(); - for (InetSocketAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next().value; - } - } - - if (publishPort < 0) { - String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName; - throw new BindTransportException( - "Failed to auto-resolve publish port" - + profileExplanation - + ", multiple bound addresses " - + boundAddresses - + " with distinct ports and none of them matched the publish address (" - + publishInetAddress - + "). " - + "Please specify a unique port by setting " - + TransportSettings.PORT.getKey() - + " or " - + TransportSettings.PUBLISH_PORT.getKey() - ); - } - return publishPort; - } - - - private static BoundTransportAddress createBoundTransportAddress(NewPlugin.ProfileSettings profileSettings, List boundAddresses) { - String[] boundAddressesHostStrings = new String[boundAddresses.size()]; - TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; - for (int i = 0; i < boundAddresses.size(); i++) { - InetSocketAddress boundAddress = boundAddresses.get(i); - boundAddressesHostStrings[i] = boundAddress.getHostString(); - transportBoundAddresses[i] = new TransportAddress(boundAddress); - } - - List publishHosts = profileSettings.publishHosts; - if (profileSettings.isDefaultProfile == false && publishHosts.isEmpty()) { - publishHosts = Arrays.asList(boundAddressesHostStrings); - } - if (publishHosts.isEmpty()) { - publishHosts = NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings); - } - - final InetAddress publishInetAddress; - try { - publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts.toArray(Strings.EMPTY_ARRAY)); - } catch (Exception e) { - throw new BindTransportException("Failed to resolve publish address", e); - } - - final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); - final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); - return new BoundTransportAddress(transportBoundAddresses, publishAddress); - } - - - protected final void bindServer(NewPlugin.ProfileSettings profileSettings) { - // Bind and start to accept incoming connections. - System.out.println("PROFILE"); - //logger.info("PROFILE", profileSettings); - InetAddress[] hostAddresses; - List profileBindHosts = profileSettings.bindHosts; - try { - hostAddresses = networkService.resolveBindHostAddresses(profileBindHosts.toArray(Strings.EMPTY_ARRAY)); - } catch (IOException e) { - throw new BindTransportException("Failed to resolve host " + profileBindHosts, e); - } - if (logger.isDebugEnabled()) { - String[] addresses = new String[hostAddresses.length]; - for (int i = 0; i < hostAddresses.length; i++) { - addresses[i] = NetworkAddress.format(hostAddresses[i]); - } - logger.debug("binding server bootstrap to: {}", (Object) addresses); - } - - assert hostAddresses.length > 0; - - List boundAddresses = new ArrayList<>(); - for (InetAddress hostAddress : hostAddresses) { - boundAddresses.add(bindToPort(profileSettings.profileName, hostAddress, profileSettings.portOrRange)); - } - - final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(profileSettings, boundAddresses); - - if (profileSettings.isDefaultProfile) { - boundAddress = boundTransportAddress; - } else { - profileBoundAddresses.put(profileSettings.profileName, boundTransportAddress); - } - } - - private void createServerBootstrap(NewPlugin.ProfileSettings profileSettings, SharedGroupFactory.SharedGroup sharedGroup) { - String name = profileSettings.profileName; - if (logger.isDebugEnabled()) { - logger.debug( - "using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], receive_predictor[{}->{}]", - name, - sharedGroupFactory.getTransportWorkerCount(), - profileSettings.portOrRange, - profileSettings.bindHosts, - profileSettings.publishHosts, - receivePredictorMin, - receivePredictorMax - ); - } - - final ServerBootstrap serverBootstrap = new ServerBootstrap(); - - serverBootstrap.group(sharedGroup.getLowLevelGroup()); - - // NettyAllocator will return the channel type designed to work with the configuredAllocator - serverBootstrap.channel(NettyAllocator.getServerChannelType()); - - // Set the allocators for both the server channel and the child channels created - serverBootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); - serverBootstrap.childOption(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); - - serverBootstrap.childHandler(getServerChannelInitializer(name)); - // serverBootstrap.handler(new ServerChannelExceptionHandler()); - - serverBootstrap.childOption(ChannelOption.TCP_NODELAY, profileSettings.tcpNoDelay); - serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, profileSettings.tcpKeepAlive); - if (profileSettings.tcpKeepAlive) { - // Note that Netty logs a warning if it can't set the option - if (profileSettings.tcpKeepIdle >= 0) { - final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); - if (keepIdleOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepIdleOption), profileSettings.tcpKeepIdle); - } - } - if (profileSettings.tcpKeepInterval >= 0) { - final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); - if (keepIntervalOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepIntervalOption), profileSettings.tcpKeepInterval); - } - - } - if (profileSettings.tcpKeepCount >= 0) { - final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); - if (keepCountOption != null) { - serverBootstrap.childOption(NioChannelOption.of(keepCountOption), profileSettings.tcpKeepCount); - } - } - } - - if (profileSettings.sendBufferSize.getBytes() != -1) { - serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(profileSettings.sendBufferSize.getBytes())); - } - - if (profileSettings.receiveBufferSize.getBytes() != -1) { - serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(profileSettings.receiveBufferSize.bytesAsInt())); - } - - serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); - - serverBootstrap.option(ChannelOption.SO_REUSEADDR, profileSettings.reuseAddress); - serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, profileSettings.reuseAddress); - serverBootstrap.validate(); - - serverBootstraps.put(name, serverBootstrap); - } - - // Test Send Request - - - - public AtomicReference> getListenerCaptor() { - return new AtomicReference(); - } - - - public void testSendRequest() throws IOException { - ThreadContext threadContext = threadPool.getThreadContext(); - Version version = Version.CURRENT; - String action = "handshake"; - long requestId = 200; - boolean isHandshake = true; - boolean compress = true; - String value = "message"; - threadContext.putHeader("header", "header_value"); - TestRequest request = new TestRequest(value); - - AtomicReference nodeRef = new AtomicReference<>(); - AtomicLong requestIdRef = new AtomicLong(); - AtomicReference actionRef = new AtomicReference<>(); - AtomicReference requestRef = new AtomicReference<>(); - handler.setMessageListener(new TransportMessageListener() { - @Override - public void onRequestSent( - DiscoveryNode node, - long requestId, - String action, - TransportRequest request, - TransportRequestOptions options - ) { - nodeRef.set(node); - requestIdRef.set(requestId); - actionRef.set(action); - requestRef.set(request); - } - }); - handler.sendRequest(node, channel, requestId, action, request, options, version, compress, isHandshake); - - //BytesReference reference = channel.getMessageCaptor().get(); - ActionListener sendListener = getListenerCaptor().get(); - boolean flag = true; - if (flag) { - sendListener.onResponse(null); - } else { - sendListener.onFailure(new IOException("failed")); - } -// assertEquals(node, nodeRef.get()); -// assertEquals(requestId, requestIdRef.get()); -// assertEquals(action, actionRef.get()); -// assertEquals(request, requestRef.get()); - - //pipeline.handleBytes(channel, new ReleasableBytesReference(reference, () -> {})); - final Tuple tuple = message.get(); - final Header header = tuple.v1(); - final TestRequest message = new TestRequest(tuple.v2().streamInput()); - logger.debug("VERSION", version); - logger.debug("HEADER", header); - logger.debug("MESSAGE", message); - // assertEquals(version, header); -// assertEquals(requestId, header.getRequestId()); -// assertTrue(header.isRequest()); -// assertFalse(header.isResponse()); -// if (isHandshake) { -// assertTrue(header.isHandshake()); -// } else { -// assertFalse(header.isHandshake()); -// } -// if (compress) { -// assertTrue(header.isCompressed()); -// } else { -// assertFalse(header.isCompressed()); -// } - -// assertEquals(value, message.value); -// assertEquals("header_value", header.getHeaders().v1().get("header")); - } - - - - public static void main(String[] args) throws IOException { - IndependentPlugin newPlugin = new IndependentPlugin(new SharedGroupFactory(settings)) { - @Override - protected void doStart() { - - } - - @Override - protected void doStop() { - - } - - @Override - protected void doClose() throws IOException { - - } - - @Override - public void setMessageListener(TransportMessageListener transportMessageListener) { - - } - - @Override - public BoundTransportAddress boundAddress() { - return null; - } - - @Override - public Map profileBoundAddresses() { - return null; - } - - @Override - public TransportAddress[] addressesFromString(String s) throws UnknownHostException { - return new TransportAddress[0]; - } - - @Override - public List getDefaultSeedAddresses() { - return null; - } - - @Override - public void openConnection(DiscoveryNode discoveryNode, ConnectionProfile connectionProfile, ActionListener actionListener) { - - } - - @Override - public TransportStats getStats() { - return null; - } - - @Override - public ResponseHandlers getResponseHandlers() { - return null; - } - - @Override - public RequestHandlers getRequestHandlers() { - return null; - } - }; - sharedGroup = sharedGroupFactory.getTransportGroup(); - - for (NewPlugin.ProfileSettings profileSettings : profileSettings) { - newPlugin.createServerBootstrap(profileSettings, sharedGroup); - newPlugin.bindServer(profileSettings); - } - newPlugin.testSendRequest(); - } -} diff --git a/src/main/java/NewPlugin.java b/src/main/java/NewPlugin.java deleted file mode 100644 index c72f364..0000000 --- a/src/main/java/NewPlugin.java +++ /dev/null @@ -1,959 +0,0 @@ - -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -/* - * Modifications Copyright OpenSearch Contributors. See - * GitHub history for details. - */ - - -import com.carrotsearch.hppc.IntHashSet; -import com.carrotsearch.hppc.IntSet; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.util.SetOnce; -import org.opensearch.bootstrap.BootstrapCheck; -import org.opensearch.bootstrap.BootstrapContext; -import org.opensearch.client.node.NodeClient; -import org.opensearch.cluster.node.DiscoveryNode; -import org.opensearch.cluster.service.ClusterService; -import org.opensearch.common.Strings; -import org.opensearch.common.breaker.CircuitBreaker; -import org.opensearch.common.collect.Tuple; -import org.opensearch.common.component.AbstractLifecycleComponent; -import org.opensearch.common.inject.Injector; -import org.opensearch.common.logging.NodeAndClusterIdStateListener; -import org.opensearch.common.settings.ClusterSettings; -import org.opensearch.common.network.NetworkAddress; -import org.opensearch.common.network.NetworkService; -import org.opensearch.common.settings.*; -import org.opensearch.common.transport.BoundTransportAddress; -import org.opensearch.common.transport.PortsRange; -import org.opensearch.common.transport.TransportAddress; -import org.opensearch.common.unit.ByteSizeValue; -import org.opensearch.common.util.BigArrays; -import org.opensearch.common.util.PageCacheRecycler; -import org.opensearch.common.xcontent.NamedXContentRegistry; -import org.opensearch.env.Environment; -import org.opensearch.env.NodeEnvironment; -import org.opensearch.indices.breaker.CircuitBreakerService; -import org.opensearch.indices.recovery.RecoverySettings; -import org.opensearch.node.NodeValidationException; -import org.opensearch.plugins.*; -import org.opensearch.repositories.Repository; -import org.opensearch.repositories.fs.FsRepository; -import org.opensearch.rest.RestController; -import org.opensearch.tasks.TaskManager; -import org.opensearch.threadpool.ExecutorBuilder; -import org.opensearch.threadpool.ThreadPool; -import org.opensearch.transport.*; - - -import java.io.BufferedWriter; -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.charset.Charset; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.*; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; - -public abstract class NewPlugin extends AbstractLifecycleComponent implements Transport { - private static NamedXContentRegistry namedXContentRegistry; - private static RecoverySettings recoverySettings; - private static ClusterService clusterService; - private static Settings settings; - //private static S3Service service; - private static Environment env; - private static RestController restController; - private static NodeEnvironment nodeEnvironment; - private static LocalNodeFactory localNodeFactory; - private static Injector injector; - private static final AtomicInteger portGenerator = new AtomicInteger(); - protected static TaskManager taskManager; - private static final String CLIENT_TYPE = "node"; - private static DiscoveryNode localNode; - private static Environment initialEnvironment; - protected static Set profileSettings = getProfileSettings(Settings.builder().put("transport.profiles.test.port", "5555").put("transport.profiles.default.port", "3333").build()); - private static final Logger logger = LogManager.getLogger(IndependentPlugin.class); - private static final ReadWriteLock closeLock = new ReentrantReadWriteLock(); - private static final Map> serverChannels = newConcurrentMap(); - private static volatile BoundTransportAddress boundAddress; - private static final ConcurrentMap profileBoundAddresses = newConcurrentMap(); - public static final String TRANSPORT_TYPE_DEFAULT_KEY = "transport.type.default"; - public static final String TRANSPORT_TYPE_KEY = "transport.type"; - public static final Setting TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Setting.Property.NodeScope); - public static final Setting TRANSPORT_DEFAULT_TYPE_SETTING = Setting.simpleString( - TRANSPORT_TYPE_DEFAULT_KEY, - Setting.Property.NodeScope - ); - private static final Map> transportFactories = new HashMap<>(); - public static final Setting WRITE_PORTS_FILE_SETTING = Setting.boolSetting("node.portsfile", false, Setting.Property.NodeScope); - private NodeClient client; - final List> executorBuilders = getExecutorBuilders(settings); - final ThreadPool threadPool = new ThreadPool(Settings.EMPTY, executorBuilders.toArray(new ExecutorBuilder[0])); - - protected NewPlugin(NodeClient client) { - this.client = client; - } - - - public static final class ProfileSettings { - public final String profileName; - public final boolean tcpNoDelay; - public final boolean tcpKeepAlive; - public final int tcpKeepIdle; - public final int tcpKeepInterval; - public final int tcpKeepCount; - public final boolean reuseAddress; - public final ByteSizeValue sendBufferSize; - public final ByteSizeValue receiveBufferSize; - public final List bindHosts; - public final List publishHosts; - public final String portOrRange; - public final int publishPort; - public final boolean isDefaultProfile; - - - public ProfileSettings(Settings settings, String profileName) { - this.profileName = profileName; - isDefaultProfile = TransportSettings.DEFAULT_PROFILE.equals(profileName); - tcpKeepAlive = TransportSettings.TCP_KEEP_ALIVE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - tcpKeepIdle = TransportSettings.TCP_KEEP_IDLE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - tcpKeepInterval = TransportSettings.TCP_KEEP_INTERVAL_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - tcpKeepCount = TransportSettings.TCP_KEEP_COUNT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - tcpNoDelay = TransportSettings.TCP_NO_DELAY_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - reuseAddress = TransportSettings.TCP_REUSE_ADDRESS_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - sendBufferSize = TransportSettings.TCP_SEND_BUFFER_SIZE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - receiveBufferSize = TransportSettings.TCP_RECEIVE_BUFFER_SIZE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - List profileBindHosts = TransportSettings.BIND_HOST_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - bindHosts = (profileBindHosts.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : profileBindHosts); - publishHosts = TransportSettings.PUBLISH_HOST_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - Setting concretePort = TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(profileName); - if (concretePort.exists(settings) == false && isDefaultProfile == false) { - throw new IllegalStateException("profile [" + profileName + "] has no port configured"); - } - portOrRange = TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - publishPort = isDefaultProfile - ? TransportSettings.PUBLISH_PORT.get(settings) - : TransportSettings.PUBLISH_PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); - } - } - - public void IndependentPlugin( - Environment env - //S3Service service, - ) { - // this.profileSettings = getProfileSettings(settings); - this.settings = settings; - this.clusterService = clusterService; - -// lifecycle = new Lifecycle(); - // lifecycle.moveToStarted(); - - client = new NodeClient(settings, threadPool); - Map factories = new HashMap<>(); - factories.put( - FsRepository.TYPE, - metadata -> new FsRepository(metadata, env, namedXContentRegistry, clusterService, recoverySettings) - ); -// S3RepositoryPlugin s3repo = new S3RepositoryPlugin(settings, service); -// Map newRepoTypes = s3repo.getRepositories( -// env, -// namedXContentRegistry, -// clusterService, -// recoverySettings -// ); -// for (Map.Entry entry : newRepoTypes.entrySet()) { -// if (factories.put(entry.getKey(), entry.getValue()) != null) { -// throw new IllegalArgumentException("Repository type [" + entry.getKey() + "] is already registered"); -// } -// } - } - - - public static Set getProfileSettings(Settings settings) { - HashSet profiles = new HashSet<>(); - boolean isDefaultSet = false; - for (String profile : settings.getGroups("transport.profiles.", true).keySet()) { - profiles.add(new ProfileSettings(settings, profile)); - if (TransportSettings.DEFAULT_PROFILE.equals(profile)) { - isDefaultSet = true; - } - } - if (isDefaultSet == false) { - profiles.add(new ProfileSettings(settings, TransportSettings.DEFAULT_PROFILE)); - } - return Collections.unmodifiableSet(profiles); - } - - final static NetworkService networkService = new NetworkService( - getCustomNameResolvers(filterPlugins(DiscoveryPlugin.class)) - ); - - - protected static TcpServerChannel bind(String name, InetSocketAddress address) throws IOException { - return null; - } - - private static InetSocketAddress bindToPort(final String name, final InetAddress hostAddress, String port) { - PortsRange portsRange = new PortsRange(port); - final AtomicReference lastException = new AtomicReference<>(); - final AtomicReference boundSocket = new AtomicReference<>(); - closeLock.writeLock().lock(); - try { - // No need for locking here since Lifecycle objects can't move from STARTED to INITIALIZED -// if (lifecycle.initialized() == false && lifecycle.started() == false) { -// throw new IllegalStateException("transport has been stopped"); -// } - boolean success = portsRange.iterate(portNumber -> { - try { - TcpServerChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); - serverChannels.computeIfAbsent(name, k -> new ArrayList<>()).add(channel); - boundSocket.set(channel.getLocalAddress()); - } catch (Exception e) { - lastException.set(e); - return false; - } - return true; - }); - if (!success) { - throw new BindTransportException( - "Failed to bind to " + NetworkAddress.format(hostAddress, portsRange), - lastException.get() - ); - } - } finally { - closeLock.writeLock().unlock(); - } - if (logger.isDebugEnabled()) { - logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); - } - - return boundSocket.get(); - } - - static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { - int publishPort = profileSettings.publishPort; - - // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress - if (publishPort < 0) { - for (InetSocketAddress boundAddress : boundAddresses) { - InetAddress boundInetAddress = boundAddress.getAddress(); - if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { - publishPort = boundAddress.getPort(); - break; - } - } - } - - // if no matching boundAddress found, check if there is a unique port for all bound addresses - if (publishPort < 0) { - final IntSet ports = new IntHashSet(); - for (InetSocketAddress boundAddress : boundAddresses) { - ports.add(boundAddress.getPort()); - } - if (ports.size() == 1) { - publishPort = ports.iterator().next().value; - } - } - - if (publishPort < 0) { - String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName; - throw new BindTransportException( - "Failed to auto-resolve publish port" - + profileExplanation - + ", multiple bound addresses " - + boundAddresses - + " with distinct ports and none of them matched the publish address (" - + publishInetAddress - + "). " - + "Please specify a unique port by setting " - + TransportSettings.PORT.getKey() - + " or " - + TransportSettings.PUBLISH_PORT.getKey() - ); - } - return publishPort; - } - - private static BoundTransportAddress createBoundTransportAddress(ProfileSettings profileSettings, List boundAddresses) { - String[] boundAddressesHostStrings = new String[boundAddresses.size()]; - TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; - for (int i = 0; i < boundAddresses.size(); i++) { - InetSocketAddress boundAddress = boundAddresses.get(i); - boundAddressesHostStrings[i] = boundAddress.getHostString(); - transportBoundAddresses[i] = new TransportAddress(boundAddress); - } - - List publishHosts = profileSettings.publishHosts; - if (profileSettings.isDefaultProfile == false && publishHosts.isEmpty()) { - publishHosts = Arrays.asList(boundAddressesHostStrings); - } - if (publishHosts.isEmpty()) { - publishHosts = NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings); - } - - final InetAddress publishInetAddress; - try { - publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts.toArray(Strings.EMPTY_ARRAY)); - } catch (Exception e) { - throw new BindTransportException("Failed to resolve publish address", e); - } - - final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); - final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); - return new BoundTransportAddress(transportBoundAddresses, publishAddress); - } - -// protected static void bindServer(ProfileSettings profileSettings) { -// // Bind and start to accept incoming connections. -// logger.info("PROFILE", profileSettings); -// InetAddress[] hostAddresses; -// List profileBindHosts = profileSettings.bindHosts; -// try { -// hostAddresses = networkService.resolveBindHostAddresses(profileBindHosts.toArray(Strings.EMPTY_ARRAY)); -// } catch (IOException e) { -// throw new BindTransportException("Failed to resolve host " + profileBindHosts, e); -// } -// if (logger.isDebugEnabled()) { -// String[] addresses = new String[hostAddresses.length]; -// for (int i = 0; i < hostAddresses.length; i++) { -// addresses[i] = NetworkAddress.format(hostAddresses[i]); -// } -// logger.debug("binding server bootstrap to: {}", (Object) addresses); -// } -// -// assert hostAddresses.length > 0; -// -// List boundAddresses = new ArrayList<>(); -// for (InetAddress hostAddress : hostAddresses) { -// boundAddresses.add(bindToPort(profileSettings.profileName, hostAddress, profileSettings.portOrRange)); -// } -// -// final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(profileSettings, boundAddresses); -// -// if (profileSettings.isDefaultProfile) { -// boundAddress = boundTransportAddress; -// } else { -// profileBoundAddresses.put(profileSettings.profileName, boundTransportAddress); -// } -// } - - public static ClusterSettings getClusterSettings() { - final Set> clusterSettingUpgraders = new HashSet<>(); - return new ClusterSettings(settings, new HashSet<>(), clusterSettingUpgraders); - } - - public static TransportAddress buildNewFakeTransportAddress() { - return new TransportAddress(TransportAddress.META_ADDRESS, portGenerator.incrementAndGet()); - } - - - - - - public static List filterPlugins(Class type) { - final List> plugins = new ArrayList<>(); - return plugins.stream().filter(x -> type.isAssignableFrom(x.v2().getClass())).map(p -> ((T) p.v2())).collect(Collectors.toList()); - } - - public static List getPluginSettingsFilter() { - final List> plugins = new ArrayList<>(); - return plugins.stream().flatMap(p -> p.v2().getSettingsFilter().stream()).collect(Collectors.toList()); - } - - protected static Optional getFeature() { - return Optional.empty(); - } - - public static List> getExecutorBuilders(Settings settings) { - final List> plugins = new ArrayList<>(); - final ArrayList> builders = new ArrayList<>(); - for (final Tuple plugin : plugins) { - builders.addAll(plugin.v2().getExecutorBuilders(settings)); - } - return builders; - } - - public static Settings settings() { - return env.settings(); - } - - private static List getCustomNameResolvers(List discoveryPlugins) { - List customNameResolvers = new ArrayList<>(); - for (DiscoveryPlugin discoveryPlugin : discoveryPlugins) { - NetworkService.CustomNameResolver customNameResolver = discoveryPlugin.getCustomNameResolver(settings()); - if (customNameResolver != null) { - customNameResolvers.add(customNameResolver); - } - } - return customNameResolvers; - } - - static BigArrays createBigArrays(PageCacheRecycler pageCacheRecycler, CircuitBreakerService circuitBreakerService) { - return new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.REQUEST); - } - - static PageCacheRecycler createPageCacheRecycler(Settings settings) { - return new PageCacheRecycler(settings); - } - - public static RestController getRestController() { - return restController; - } - - public TaskManager getTaskManager() { - return taskManager; - } - - public static Supplier getTransportSupplier() { - final String name; -// Settings settings = Settings.builder() -// .put(NetworkModule.HTTP_DEFAULT_TYPE_SETTING.getKey(), "default_custom") -// .put(NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.getKey(), "default_custom") -// .build(); - if (TRANSPORT_TYPE_SETTING.exists(settings)) { - name = TRANSPORT_TYPE_SETTING.get(settings); - } else { - name = TRANSPORT_DEFAULT_TYPE_SETTING.get(settings); - } - final Supplier factory = transportFactories.get(name); - if (factory == null) { - throw new IllegalStateException("Unsupported transport.type [" + name + "]"); - } - return factory; - } - -// protected static T getInstanceFromNode(Class clazz) { -// return NODE.injector().getInstance(clazz); -// } - - -// public static Settings updatedSettings() { -// final List> plugins = new ArrayList<>(); -// Map foundSettings = new HashMap<>(); -// final Map features = new TreeMap<>(); -// final Settings.Builder builder = Settings.builder(); -// for (Tuple plugin : plugins) { -// Settings settings = plugin.v2().additionalSettings(); -// for (String setting : settings.keySet()) { -// String oldPlugin = foundSettings.put(setting, plugin.v1().getName()); -// if (oldPlugin != null) { -// throw new IllegalArgumentException( -// "Cannot have additional setting [" -// + setting -// + "] " -// + "in plugin [" -// + plugin.v1().getName() -// + "], already added in plugin [" -// + oldPlugin -// + "]" -// ); -// } -// } -// builder.put(settings); -// final Optional maybeFeature = getFeature(); -// if (maybeFeature.isPresent()) { -// final String feature = maybeFeature.get(); -// if (features.containsKey(feature)) { -// final String message = String.format( -// Locale.ROOT, -// "duplicate feature [%s] in plugin [%s], already added in [%s]", -// feature, -// plugin.v1().getName(), -// features.get(feature) -// ); -// throw new IllegalArgumentException(message); -// } -// features.put(feature, plugin.v1().getName()); -// } -// } -// for (final String feature : features.keySet()) { -// builder.put(TransportSettings.FEATURE_PREFIX + "." + feature, true); -// } -// try { -// return builder.put(IndependentPlugin.settings).build(); -// } catch(NullPointerException e) { -// logger.info("Null Pointer Exception"); -// } -// return settings; -// } - - - public static void main(String[] args) throws IOException { - //System.out.println("Hello from main"); - logger.info("From main"); -// -// // ----- Dependency on PluginsService --- -// //final Settings settings = pluginsService.updatedSettings(); -// -// // final Setting NODE_DATA_SETTING = Setting.boolSetting( -// // "node.data", -// // true, -// // Setting.Property.Deprecated, -// // Setting.Property.NodeScope -// // ); -// -// // final Setting NODE_MASTER_SETTING = Setting.boolSetting( -// // "node.master", -// // true, -// // Setting.Property.Deprecated, -// // Setting.Property.NodeScope -// // ); -// -// // final Setting NODE_INGEST_SETTING = Setting.boolSetting( -// // "node.ingest", -// // true, -// // Setting.Property.Deprecated, -// // Setting.Property.NodeScope -// // ); -// -// // final Setting NODE_REMOTE_CLUSTER_CLIENT = Setting.boolSetting( -// // "node.remote_cluster_client", -// // RemoteClusterService.ENABLE_REMOTE_CLUSTERS, -// // Setting.Property.Deprecated, -// // Setting.Property.NodeScope -// // ); -// -// final List> additionalSettings = new ArrayList<>(); -// //register the node.data, node.ingest, node.master, node.remote_cluster_client settings here so we can mark them private -//// additionalSettings.add(NODE_DATA_SETTING); -//// additionalSettings.add(NODE_INGEST_SETTING); -//// additionalSettings.add(NODE_MASTER_SETTING); -//// additionalSettings.add(NODE_REMOTE_CLUSTER_CLIENT); -// -// -// //----- Dependency on PluginsService --- -// final Set> settingsUpgraders = filterPlugins(Plugin.class) -// .stream() -// .map(Plugin::getSettingUpgraders) -// .flatMap(List::stream) -// .collect(Collectors.toSet()); -// -// //final Settings settings = updatedSettings(); -// File homeDir = null; -// try { -// homeDir = File.createTempFile("temp", Long.toString(System.nanoTime())); -// } catch (IOException e) { -// e.printStackTrace(); -// } -// Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), String.valueOf(homeDir)).build(); -// // ----- Dependency on PluginsService --- -// final List additionalSettingsFilter = new ArrayList<>(getPluginSettingsFilter()); -// -// final SettingsModule settingsModule = new SettingsModule( -// settings, -// additionalSettings, -// additionalSettingsFilter, -// settingsUpgraders -// ); -// -// -// //final RecoverySettings recoverySettings = new RecoverySettings(settings, getClusterSettings()); -// -// //----- Dependency on PluginsService --- -// SearchModule searchModule = new SearchModule(settings, filterPlugins(SearchPlugin.class)); -// -// // ----- Dependency on PluginsService --- -// NamedXContentRegistry xContentRegistry = new NamedXContentRegistry( -// Stream.of( -// NetworkModule.getNamedXContents().stream(), -// IndicesModule.getNamedXContents().stream(), -// searchModule.getNamedXContents().stream(), -// filterPlugins(Plugin.class).stream().flatMap(p -> p.getNamedXContent().stream()), -// ClusterModule.getNamedXWriteables().stream() -// ).flatMap(Function.identity()).collect(toList()) -// ); -// -// -//// final ClusterService clusterService = injector.getInstance(ClusterService.class); -// // final ClusterService clusterService = getInstanceFromNode(ClusterService.class); -// -// //final Environment initialEnvironment = (Environment) System.getenv(); -// -// //Environment environment = new Environment(settings, initialEnvironment.configFile(), Node.NODE_LOCAL_STORAGE_SETTING.get(settings)); -// -// //IndependentPlugin Indplug = new IndependentPlugin(environment, clusterService, xContentRegistry, recoverySettings, settings, profileSettings); -//// S3Service service = new S3Service(); -//// IndependentPlugin Indplug = new IndependentPlugin(environment, clusterService, xContentRegistry, recoverySettings, settings, service, profileSettings) { -//// @Override -//// protected void doStart() { -//// -//// } -//// -//// @Override -//// protected void doStop() { -//// -//// } -//// -//// @Override -//// protected void doClose() throws IOException { -//// -//// } -//// }; -// -// -// //Transport Service -// -// //----- Dependency on PluginsService --- -// final List> executorBuilders = getExecutorBuilders(settings); -// final ThreadPool threadPool = new ThreadPool(settings, executorBuilders.toArray(new ExecutorBuilder[0])); -// List pluginCircuitBreakers = filterPlugins(CircuitBreakerPlugin.class) -// .stream() -// .map(plugin -> plugin.getCircuitBreaker(settings)) -// .collect(toList()); -// -// final CircuitBreakerService circuitBreakerService = createCircuitBreakerService( -// settingsModule.getSettings(), -// pluginCircuitBreakers, -// settingsModule.getClusterSettings() -// ); -// -// PageCacheRecycler pageCacheRecycler = createPageCacheRecycler(settings); -// BigArrays bigArrays = createBigArrays(pageCacheRecycler, circuitBreakerService); -// IndicesModule indicesModule = new IndicesModule(filterPlugins(MapperPlugin.class)); -// -// List namedWriteables = Stream.of( -// NetworkModule.getNamedWriteables().stream(), -// indicesModule.getNamedWriteables().stream(), -// searchModule.getNamedWriteables().stream(), -// filterPlugins(Plugin.class).stream().flatMap(p -> p.getNamedWriteables().stream()), -// ClusterModule.getNamedWriteables().stream() -// ).flatMap(Function.identity()).collect(toList()); -// -// final NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(namedWriteables); -// -// final NetworkService networkService = new NetworkService( -// getCustomNameResolvers(filterPlugins(DiscoveryPlugin.class)) -// ); -// -// -// final RestController restController = getRestController(); -// -// -// final NetworkModule networkModule = new NetworkModule( -// settings, -// filterPlugins(NetworkPlugin.class), -// threadPool, -// bigArrays, -// pageCacheRecycler, -// circuitBreakerService, -// namedWriteableRegistry, -// xContentRegistry, -// networkService, -// restController, -// new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS) -// ); -// -// // final Transport transport = networkModule.getTransportSupplier().get(); -// Set taskHeaders = Stream.concat( -// filterPlugins(ActionPlugin.class).stream().flatMap(p -> p.getTaskHeaders().stream()), -// Stream.of(Task.X_OPAQUE_ID) -// ).collect(Collectors.toSet()); -// -// //Settings tmpSettings = Settings.builder() -// // .put(initialEnvironment.settings()) -// // .put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE) -// // .build(); -// //nodeEnvironment = new NodeEnvironment(tmpSettings, env); -// //localNodeFactory = new LocalNodeFactory(settings, nodeEnvironment.nodeId()); -// final TransportService transportService = new TransportService( -// settings, -// null, -// threadPool, -// networkModule.getTransportInterceptor(), -// null, -// settingsModule.getClusterSettings(), -// taskHeaders -// ); -// ModulesBuilder modules = new ModulesBuilder(); -// injector = modules.createInjector(); -// //TransportService transportService = injector.getInstance(TransportService.class); -// transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); -// transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); -// transportService.start(); -// transportService.acceptIncomingRequests(); - - -// for (ProfileSettings profileSettings : profileSettings) { -// bindServer(profileSettings); -// } - - // doStart(); - - - } - -// private static TransportService newTransportService(Settings settings, Transport transport, ThreadPool threadPool, TransportInterceptor transportInterceptor, LocalNodeFactory localNodeFactory, ClusterSettings clusterSettings, Set taskHeaders) { -// return new TransportService(settings, transport, threadPool, transportInterceptor, localNodeFactory, clusterSettings, taskHeaders); -// } - - - - - // Start the transport service now so the publish address will be added to the local disco node in ClusterService -// TransportService transportService = injector.getInstance(TransportService.class); -// transportService.start(); - - -// public static void main(String[] args ) { -// S3RepositoryPlugin s3repo = new S3RepositoryPlugin(settings, service); -// Map newRepoTypes = s3repo.getRepositories( -// env, -// namedXContentRegistry, -// clusterService, -// recoverySettings -// ); -// } - -// public IndependentPlugin start() throws NodeValidationException { -// if (!lifecycle.moveToStarted()) { -// return this; -// } -// -// logger.info("starting ..."); -// //pluginLifecycleComponents.forEach(LifecycleComponent::start); -// -//// injector.getInstance(MappingUpdatedAction.class).setClient(client); -//// injector.getInstance(IndicesService.class).start(); -//// injector.getInstance(IndicesClusterStateService.class).start(); -//// injector.getInstance(SnapshotsService.class).start(); -//// injector.getInstance(SnapshotShardsService.class).start(); -//// injector.getInstance(RepositoriesService.class).start(); -//// injector.getInstance(SearchService.class).start(); -//// injector.getInstance(FsHealthService.class).start(); -// //nodeService.getMonitorService().start(); -// -// //final ClusterService clusterService = injector.getInstance(ClusterService.class); -// -// final NodeConnectionsService nodeConnectionsService = injector.getInstance(NodeConnectionsService.class); -// nodeConnectionsService.start(); -//// clusterService.setNodeConnectionsService(nodeConnectionsService); -//// -//// injector.getInstance(GatewayService.class).start(); -// Discovery discovery = injector.getInstance(Discovery.class); -//// clusterService.getMasterService().setClusterStatePublisher(discovery::publish); -// -// // Start the transport service now so the publish address will be added to the local disco node in ClusterService -// TransportService transportService = injector.getInstance(TransportService.class); -// transportService.getTaskManager().setTaskResultsService(injector.getInstance(TaskResultsService.class)); -// transportService.getTaskManager().setTaskCancellationService(new TaskCancellationService(transportService)); -// transportService.start(); -// assert localNodeFactory.getNode() != null; -// assert transportService.getLocalNode() -// .equals(localNodeFactory.getNode()) : "transportService has a different local node than the factory provided"; -// injector.getInstance(PeerRecoverySourceService.class).start(); -// -// // Load (and maybe upgrade) the metadata stored on disk -// final GatewayMetaState gatewayMetaState = injector.getInstance(GatewayMetaState.class); -// gatewayMetaState.start( -// settings(), -// transportService, -// null, -// injector.getInstance(MetaStateService.class), -// injector.getInstance(MetadataIndexUpgradeService.class), -// injector.getInstance(MetadataUpgrader.class), -// injector.getInstance(PersistedClusterStateService.class) -// ); -// if (Assertions.ENABLED) { -// try { -// assert injector.getInstance(MetaStateService.class).loadFullState().v1().isEmpty(); -// final NodeMetadata nodeMetadata = NodeMetadata.FORMAT.loadLatestState( -// logger, -// NamedXContentRegistry.EMPTY, -// nodeEnvironment.nodeDataPaths() -// ); -// assert nodeMetadata != null; -// assert nodeMetadata.nodeVersion().equals(Version.CURRENT); -// assert nodeMetadata.nodeId().equals(localNodeFactory.getNode().getId()); -// } catch (IOException e) { -// assert false : e; -// } -// } -// // we load the global state here (the persistent part of the cluster state stored on disk) to -// // pass it to the bootstrap checks to allow plugins to enforce certain preconditions based on the recovered state. -// final Metadata onDiskMetadata = gatewayMetaState.getPersistedState().getLastAcceptedState().metadata(); -// assert onDiskMetadata != null : "metadata is null but shouldn't"; // this is never null -// validateNodeBeforeAcceptingRequests( -// new BootstrapContext(env, onDiskMetadata), -// transportService.boundAddress(), -// filterPlugins(Plugin.class).stream().flatMap(p -> p.getBootstrapChecks().stream()).collect(Collectors.toList()) -// ); -// -// clusterService.addStateApplier(transportService.getTaskManager()); -// // start after transport service so the local disco is known -// discovery.start(); // start before cluster service so that it can set initial state on ClusterApplierService -// clusterService.start(); -// assert clusterService.localNode() -// .equals(localNodeFactory.getNode()) : "clusterService has a different local node than the factory provided"; -// transportService.acceptIncomingRequests(); -// discovery.startInitialJoin(); -// final TimeValue initialStateTimeout = Node.DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings()); -// configureNodeAndClusterIdStateListener(clusterService); -// -// if (initialStateTimeout.millis() > 0) { -// final ThreadPool thread = injector.getInstance(ThreadPool.class); -// ClusterState clusterState = clusterService.state(); -// ClusterStateObserver observer = new ClusterStateObserver(clusterState, clusterService, null, logger, thread.getThreadContext()); -// -// if (clusterState.nodes().getMasterNodeId() == null) { -// logger.debug("waiting to join the cluster. timeout [{}]", initialStateTimeout); -// final CountDownLatch latch = new CountDownLatch(1); -// observer.waitForNextChange(new ClusterStateObserver.Listener() { -// @Override -// public void onNewClusterState(ClusterState state) { -// latch.countDown(); -// } -// -// @Override -// public void onClusterServiceClose() { -// latch.countDown(); -// } -// -// @Override -// public void onTimeout(TimeValue timeout) { -// logger.warn("timed out while waiting for initial discovery state - timeout: {}", initialStateTimeout); -// latch.countDown(); -// } -// }, state -> state.nodes().getMasterNodeId() != null, initialStateTimeout); -// -// try { -// latch.await(); -// } catch (InterruptedException e) { -// throw new OpenSearchTimeoutException("Interrupted while waiting for initial discovery state"); -// } -// } -// } -// -// injector.getInstance(HttpServerTransport.class).start(); -// -// if (WRITE_PORTS_FILE_SETTING.get(settings())) { -// TransportService transport = injector.getInstance(TransportService.class); -// writePortsFile("transport", transport.boundAddress()); -// HttpServerTransport http = injector.getInstance(HttpServerTransport.class); -// writePortsFile("http", http.boundAddress()); -// } -// -// logger.info("started"); -// -// filterPlugins(ClusterPlugin.class).forEach(ClusterPlugin::onNodeStarted); -// -// return this; -// } - - private static class LocalNodeFactory implements Function { - private final SetOnce localNode = new SetOnce<>(); - private final String persistentNodeId; - private final Settings settings; - - private LocalNodeFactory(Settings settings, String persistentNodeId) { - this.persistentNodeId = persistentNodeId; - this.settings = settings; - } - - @Override - public DiscoveryNode apply(BoundTransportAddress boundTransportAddress) { - localNode.set(DiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); - return localNode.get(); - } - - DiscoveryNode getNode() { - assert localNode.get() != null; - return localNode.get(); - } - } - - protected void validateNodeBeforeAcceptingRequests( - final BootstrapContext context, - final BoundTransportAddress boundTransportAddress, - List bootstrapChecks - ) throws NodeValidationException {} - - protected void configureNodeAndClusterIdStateListener(ClusterService clusterService) { - NodeAndClusterIdStateListener.getAndSetNodeIdAndClusterId( - clusterService, - injector.getInstance(ThreadPool.class).getThreadContext() - ); - } - - private void writePortsFile(String type, BoundTransportAddress boundAddress) { - Path tmpPortsFile = env.logsFile().resolve(type + ".ports.tmp"); - try (BufferedWriter writer = Files.newBufferedWriter(tmpPortsFile, Charset.forName("UTF-8"))) { - for (TransportAddress address : boundAddress.boundAddresses()) { - InetAddress inetAddress = InetAddress.getByName(address.getAddress()); - writer.write(NetworkAddress.format(new InetSocketAddress(inetAddress, address.getPort())) + "\n"); - } - } catch (IOException e) { - throw new RuntimeException("Failed to write ports file", e); - } - Path portsFile = env.logsFile().resolve(type + ".ports"); - try { - Files.move(tmpPortsFile, portsFile, StandardCopyOption.ATOMIC_MOVE); - } catch (IOException e) { - throw new RuntimeException("Failed to rename ports file", e); - } - } - - -} - - -//import org.opensearch.common.transport.TransportAddress; -//import org.apache.logging.log4j.Logger; -//import org.apache.logging.log4j.LogManager; -//import java.util.concurrent.atomic.AtomicInteger; -// -//public class IndependentPlugin { -// public IndependentPlugin() { -// System.out.println("Constructor"); -// } -// private static final AtomicInteger portGenerator = new AtomicInteger(); -// private static final Logger logger = LogManager.getLogger(IndependentPlugin.class); -// -// -//// public static TransportAddress buildNewFakeTransportAddress() { -//// return new TransportAddress(TransportAddress.META_ADDRESS, portGenerator.incrementAndGet()); -//// } -// -// public static void main(String[] args) { -// -// System.out.println("Main"); -// } -//} diff --git a/src/main/java/Opensearch.java b/src/main/java/Opensearch.java deleted file mode 100644 index 36bcd76..0000000 --- a/src/main/java/Opensearch.java +++ /dev/null @@ -1,12 +0,0 @@ -import org.opensearch.node.NodeValidationException; - -import java.io.IOException; - -public class Opensearch { - private static volatile Bootstrap INSTANCE; - - public static void main(String[] args) throws NodeValidationException, IOException { - INSTANCE = new Bootstrap(); - INSTANCE.start(); - } -} diff --git a/src/main/java/SimpleMockNioTransportTests.java b/src/main/java/SimpleMockNioTransportTests.java deleted file mode 100644 index 26806e0..0000000 --- a/src/main/java/SimpleMockNioTransportTests.java +++ /dev/null @@ -1,117 +0,0 @@ -///* -// * SPDX-License-Identifier: Apache-2.0 -// * -// * The OpenSearch Contributors require contributions made to -// * this file be licensed under the Apache-2.0 license or a -// * compatible open source license. -// */ -// -///* -// * Licensed to Elasticsearch under one or more contributor -// * license agreements. See the NOTICE file distributed with -// * this work for additional information regarding copyright -// * ownership. Elasticsearch licenses this file to you under -// * the Apache License, Version 2.0 (the "License"); you may -// * not use this file except in compliance with the License. -// * You may obtain a copy of the License at -// * -// * http://www.apache.org/licenses/LICENSE-2.0 -// * -// * Unless required by applicable law or agreed to in writing, -// * software distributed under the License is distributed on an -// * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// * KIND, either express or implied. See the License for the -// * specific language governing permissions and limitations -// * under the License. -// */ -// -///* -// * Modifications Copyright OpenSearch Contributors. See -// * GitHub history for details. -// */ -// -// -// -//import org.opensearch.Version; -//import org.opensearch.action.ActionListener; -//import org.opensearch.cluster.node.DiscoveryNode; -//import org.opensearch.common.io.stream.NamedWriteableRegistry; -//import org.opensearch.common.network.NetworkService; -//import org.opensearch.common.settings.ClusterSettings; -//import org.opensearch.common.settings.Settings; -//import org.opensearch.common.transport.TransportAddress; -//import org.opensearch.common.util.MockPageCacheRecycler; -//import org.opensearch.indices.breaker.NoneCircuitBreakerService; -//import org.opensearch.transport.AbstractSimpleTransportTestCase; -//import org.opensearch.transport.ConnectTransportException; -//import org.opensearch.transport.ConnectionProfile; -//import org.opensearch.transport.TcpChannel; -//import org.opensearch.transport.Transport; -// -//import java.io.IOException; -//import java.net.InetAddress; -//import java.net.UnknownHostException; -//import java.util.Collections; -// -//import static java.util.Collections.emptyMap; -//import static java.util.Collections.emptySet; -//import static org.hamcrest.Matchers.containsString; -//import static org.hamcrest.Matchers.instanceOf; -// -//public class SimpleMockNioTransportTests extends AbstractSimpleTransportTestCase { -// -// @Override -// protected Transport build(Settings settings, final Version version, ClusterSettings clusterSettings, boolean doHandshake) { -// NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(Collections.emptyList()); -// NetworkService networkService = new NetworkService(Collections.emptyList()); -// return new MockNioTransport( -// settings, -// version, -// threadPool, -// networkService, -// new MockPageCacheRecycler(settings), -// namedWriteableRegistry, -// new NoneCircuitBreakerService() -// ) { -// -// @Override -// public void executeHandshake( -// DiscoveryNode node, -// TcpChannel channel, -// ConnectionProfile profile, -// ActionListener listener -// ) { -// if (doHandshake) { -// super.executeHandshake(node, channel, profile, listener); -// } else { -// listener.onResponse(version.minimumCompatibilityVersion()); -// } -// } -// }; -// } -// -// @Override -// protected int channelsPerNodeConnection() { -// return 3; -// } -// -// public void testConnectException() throws UnknownHostException { -// try { -// serviceA.connectToNode( -// new DiscoveryNode( -// "C", -// new TransportAddress(InetAddress.getByName("localhost"), 9876), -// emptyMap(), -// emptySet(), -// Version.CURRENT -// ) -// ); -// fail("Expected ConnectTransportException"); -// } catch (ConnectTransportException e) { -// assertThat(e.getMessage(), containsString("connect_exception")); -// assertThat(e.getMessage(), containsString("[127.0.0.1:9876]")); -// Throwable cause = e.getCause(); -// assertThat(cause, instanceOf(IOException.class)); -// } -// } -//} diff --git a/src/main/java/Bootstrap.java b/src/main/java/transportservice/Bootstrap.java similarity index 85% rename from src/main/java/Bootstrap.java rename to src/main/java/transportservice/Bootstrap.java index 6805c14..6704e5d 100644 --- a/src/main/java/Bootstrap.java +++ b/src/main/java/transportservice/Bootstrap.java @@ -1,21 +1,18 @@ +package transportservice; + import org.opensearch.Version; -import org.opensearch.bootstrap.BootstrapCheck; -import org.opensearch.bootstrap.BootstrapContext; import org.opensearch.common.settings.Settings; -import org.opensearch.common.transport.BoundTransportAddress; import org.opensearch.env.Environment; -import org.opensearch.node.Node; import org.opensearch.node.NodeValidationException; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; -import java.util.List; import java.util.concurrent.CountDownLatch; public class Bootstrap { - private volatile IndependentPlugin node; + private final Thread keepAliveThread; private final CountDownLatch keepAliveLatch = new CountDownLatch(1); @@ -69,9 +66,6 @@ public void run() { // }; } - void start() throws NodeValidationException { - node.start(); - keepAliveThread.start(); - } + } diff --git a/src/main/java/CopyBytesServerSocketChannel.java b/src/main/java/transportservice/CopyBytesServerSocketChannel.java similarity index 92% rename from src/main/java/CopyBytesServerSocketChannel.java rename to src/main/java/transportservice/CopyBytesServerSocketChannel.java index 303eabb..7dfce8a 100644 --- a/src/main/java/CopyBytesServerSocketChannel.java +++ b/src/main/java/transportservice/CopyBytesServerSocketChannel.java @@ -1,4 +1,4 @@ -/* +package transportservice;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to @@ -25,9 +25,9 @@ * under the License. */ /* - * Copyright 2012 The Netty Project + * Copyright 2012 The transportservice.Netty Project * - * The Netty Project licenses this file to you under the Apache License, + * The transportservice.Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * @@ -53,7 +53,7 @@ import java.util.List; /** - * This class is adapted from {@link NioServerSocketChannel} class in the Netty project. It overrides the + * This class is adapted from {@link NioServerSocketChannel} class in the transportservice.netty4.Netty project. It overrides the * channel read messages behavior to ensure that a {@link CopyBytesSocketChannel} socket channel is created. */ public class CopyBytesServerSocketChannel extends NioServerSocketChannel { diff --git a/src/main/java/CopyBytesSocketChannel.java b/src/main/java/transportservice/CopyBytesSocketChannel.java similarity index 96% rename from src/main/java/CopyBytesSocketChannel.java rename to src/main/java/transportservice/CopyBytesSocketChannel.java index af24b38..b9f5970 100644 --- a/src/main/java/CopyBytesSocketChannel.java +++ b/src/main/java/transportservice/CopyBytesSocketChannel.java @@ -1,4 +1,4 @@ -/* +package transportservice;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to @@ -25,9 +25,9 @@ * under the License. */ /* - * Copyright 2012 The Netty Project + * Copyright 2012 The transportservice.netty4.Netty Project * - * The Netty Project licenses this file to you under the Apache License, + * The transportservice.Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * @@ -59,7 +59,7 @@ import static io.netty.channel.internal.ChannelUtils.MAX_BYTES_PER_GATHERING_WRITE_ATTEMPTED_LOW_THRESHOLD; /** - * This class is adapted from {@link NioSocketChannel} class in the Netty project. It overrides the channel + * This class is adapted from {@link NioSocketChannel} class in the transportservice.Netty project. It overrides the channel * read/write behavior to ensure that the bytes are always copied to a thread-local direct bytes buffer. This * happens BEFORE the call to the Java {@link SocketChannel} is issued. * @@ -72,7 +72,7 @@ public class CopyBytesSocketChannel extends Netty4NioSocketChannel { private static final int MAX_BYTES_PER_WRITE = StrictMath.toIntExact( - ByteSizeValue.parseBytesSizeValue(System.getProperty("opensearch.transport.buffer.size", "1m"), "opensearch.transport.buffer.size") + ByteSizeValue.parseBytesSizeValue(System.getProperty("opensearch.transportservice.transport.buffer.size", "1m"), "opensearch.transportservice.transport.buffer.size") .getBytes() ); diff --git a/src/main/java/Netty4NioSocketChannel.java b/src/main/java/transportservice/Netty4NioSocketChannel.java similarity index 98% rename from src/main/java/Netty4NioSocketChannel.java rename to src/main/java/transportservice/Netty4NioSocketChannel.java index 0920837..da9d3c7 100644 --- a/src/main/java/Netty4NioSocketChannel.java +++ b/src/main/java/transportservice/Netty4NioSocketChannel.java @@ -1,4 +1,4 @@ -/* +package transportservice;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to diff --git a/src/main/java/NettyAllocator.java b/src/main/java/transportservice/NettyAllocator.java similarity index 99% rename from src/main/java/NettyAllocator.java rename to src/main/java/transportservice/NettyAllocator.java index 6f12d9e..9757f8c 100644 --- a/src/main/java/NettyAllocator.java +++ b/src/main/java/transportservice/NettyAllocator.java @@ -1,4 +1,4 @@ -/* +package transportservice;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to diff --git a/src/main/java/NettyByteBufSizer.java b/src/main/java/transportservice/NettyByteBufSizer.java similarity index 98% rename from src/main/java/NettyByteBufSizer.java rename to src/main/java/transportservice/NettyByteBufSizer.java index d761807..2d0c973 100644 --- a/src/main/java/NettyByteBufSizer.java +++ b/src/main/java/transportservice/NettyByteBufSizer.java @@ -1,4 +1,4 @@ -/* +package transportservice;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to diff --git a/src/main/java/OpenSearchLoggingHandler.java b/src/main/java/transportservice/OpenSearchLoggingHandler.java similarity index 98% rename from src/main/java/OpenSearchLoggingHandler.java rename to src/main/java/transportservice/OpenSearchLoggingHandler.java index 91f44ec..73a73de 100644 --- a/src/main/java/OpenSearchLoggingHandler.java +++ b/src/main/java/transportservice/OpenSearchLoggingHandler.java @@ -1,4 +1,4 @@ -/* +package transportservice;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to diff --git a/src/main/java/transportservice/RunPlugin.java b/src/main/java/transportservice/RunPlugin.java new file mode 100644 index 0000000..192e7dc --- /dev/null +++ b/src/main/java/transportservice/RunPlugin.java @@ -0,0 +1,164 @@ +package transportservice; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.indices.breaker.NoneCircuitBreakerService; +import org.opensearch.search.SearchModule; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.*; +import transportservice.netty4.Netty; +import transportservice.transport.ClusterConnectionManager; +import transportservice.transport.ConnectionManager; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.*; +import java.util.Collections; +import java.util.function.Function; + +import static java.util.Collections.emptyMap; +import static java.util.Collections.emptySet; + +public class RunPlugin { + + private static final Settings settings = Settings.builder() + .put("node.name", "NettySizeHeaderFrameDecoderTests") + .put(TransportSettings.BIND_HOST.getKey(), "127.0.0.1") + .put(TransportSettings.PORT.getKey(), "0") + .build(); + private static final Logger logger = LogManager.getLogger(RunPlugin.class); + private static LocalNodeFactory localNodeFactory = null; + public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() { + }; + private static final Version CURRENT_VERSION = Version.fromString(String.valueOf(Version.CURRENT.major) + ".0.0"); + protected static final Version version0 = CURRENT_VERSION.minimumCompatibilityVersion(); + + public RunPlugin(LocalNodeFactory localNodeFactory) { + // DUMMY VALUE + this.localNodeFactory = new LocalNodeFactory(settings, "5"); + } + + @SuppressForbidden(reason = "need local ephemeral port") + protected static InetSocketAddress getLocalEphemeral() throws UnknownHostException { + return new InetSocketAddress(InetAddress.getLocalHost(), 0); + } + + public static void main(String[] args) { + + ThreadPool threadPool = new TestThreadPool("test"); + NetworkService networkService = new NetworkService(Collections.emptyList()); + PageCacheRecycler pageCacheRecycler = new PageCacheRecycler(settings); + NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables()); + final CircuitBreakerService circuitBreakerService = new NoneCircuitBreakerService(); + + + Netty transport = new Netty( + settings, + Version.CURRENT, + threadPool, + networkService, + pageCacheRecycler, + namedWriteableRegistry, + circuitBreakerService, + new SharedGroupFactory(settings) + ); + + final ConnectionManager connectionManager = new ClusterConnectionManager(settings, transport); + + + + final TransportService transportService = new TransportService( + transport, + connectionManager, + transport.getResponseHandlers(), + threadPool, + localNodeFactory, + NOOP_TRANSPORT_INTERCEPTOR + ); + +// connectionManager.addListener(transportService); + + transportService.start(); + transportService.acceptIncomingRequests(); + + // Action Listener + + boolean flag = true; + try (ServerSocket socket = new ServerSocket()) { + socket.bind(getLocalEphemeral(), 1); + socket.setReuseAddress(true); + DiscoveryNode dummy = new DiscoveryNode( + "TEST", + new TransportAddress(socket.getInetAddress(), socket.getLocalPort()), + emptyMap(), + emptySet(), + version0 + ); + Thread t = new Thread() { + @Override + public void run() { + try (Socket accept = socket.accept()) { + if (flag) { // sometimes wait until the other side sends the message + accept.getInputStream().read(); + } + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + }; + t.start(); + ConnectionProfile.Builder builder = new ConnectionProfile.Builder(); + builder.addConnections( + 1, + TransportRequestOptions.Type.BULK, + TransportRequestOptions.Type.PING, + TransportRequestOptions.Type.RECOVERY, + TransportRequestOptions.Type.REG, + TransportRequestOptions.Type.STATE + ); + builder.setHandshakeTimeout(TimeValue.timeValueHours(1)); + //transportService.connectToNode(dummy, builder.build()); + t.join(); + + } catch (IOException e) { + e.printStackTrace(); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + + private static class LocalNodeFactory implements Function { + private final SetOnce localNode = new SetOnce<>(); + private final String persistentNodeId; + private final Settings settings; + + private LocalNodeFactory(Settings settings, String persistentNodeId) { + this.persistentNodeId = persistentNodeId; + this.settings = settings; + } + + @Override + public DiscoveryNode apply(BoundTransportAddress boundTransportAddress) { + localNode.set(DiscoveryNode.createLocal(settings, boundTransportAddress.publishAddress(), persistentNodeId)); + return localNode.get(); + } + + DiscoveryNode getNode() { + assert localNode.get() != null; + return localNode.get(); + } + } + +} diff --git a/src/main/java/SharedGroupFactory.java b/src/main/java/transportservice/SharedGroupFactory.java similarity index 97% rename from src/main/java/SharedGroupFactory.java rename to src/main/java/transportservice/SharedGroupFactory.java index 0cc7812..af3036e 100644 --- a/src/main/java/SharedGroupFactory.java +++ b/src/main/java/transportservice/SharedGroupFactory.java @@ -1,4 +1,4 @@ -/* +package transportservice;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to @@ -38,6 +38,7 @@ import org.apache.logging.log4j.Logger; import org.opensearch.common.settings.Settings; import org.opensearch.common.util.concurrent.AbstractRefCounted; +import transportservice.netty4.Netty; import java.util.concurrent.TimeUnit; @@ -57,7 +58,7 @@ public final class SharedGroupFactory { public SharedGroupFactory(Settings settings) { this.settings = settings; - this.workerCount = IndependentPlugin.WORKER_COUNT.get(settings); + this.workerCount = Netty.WORKER_COUNT.get(settings); } public Settings getSettings() { diff --git a/src/main/java/transportservice/TcpTransport.java b/src/main/java/transportservice/TcpTransport.java new file mode 100644 index 0000000..5a04932 --- /dev/null +++ b/src/main/java/transportservice/TcpTransport.java @@ -0,0 +1,730 @@ +package transportservice; + +import com.carrotsearch.hppc.IntHashSet; +import com.carrotsearch.hppc.IntSet; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.OpenSearchException; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Booleans; +import org.opensearch.common.Strings; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.component.Lifecycle; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.metrics.MeanMetric; +import org.opensearch.common.network.CloseableChannel; +import org.opensearch.common.network.NetworkAddress; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.PortsRange; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.util.BigArrays; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.monitor.jvm.JvmInfo; +import org.opensearch.node.Node; +import org.opensearch.rest.RestStatus; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.*; +import transportservice.component.AbstractLifecycleComponent; +import transportservice.transport.ConnectionProfile; +import transportservice.transport.InboundHandler; +import transportservice.transport.OutboundHandler; +import transportservice.transport.Transport; +import transportservice.transport.TransportHandshaker; +import transportservice.transport.TransportKeepAlive; + +import java.io.IOException; +import java.io.StreamCorruptedException; +import java.net.BindException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.nio.channels.CancelledKeyException; +import java.nio.charset.StandardCharsets; +import java.util.*; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Supplier; +import transportservice.transport.TcpChannel; +import transportservice.transport.InboundMessage; +import transportservice.transport.Transport.ResponseHandlers; + + +import static java.util.Collections.unmodifiableMap; +import static org.opensearch.common.transport.NetworkExceptionHelper.isCloseConnectionException; +import static org.opensearch.common.transport.NetworkExceptionHelper.isConnectException; +import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; + +public abstract class TcpTransport extends AbstractLifecycleComponent implements Transport { + + private static final Logger logger = LogManager.getLogger(TcpTransport.class); + // This is the number of bytes necessary to read the message size + private static final int BYTES_NEEDED_FOR_MESSAGE_SIZE = TcpHeader.MARKER_BYTES_SIZE + TcpHeader.MESSAGE_LENGTH_SIZE; + private static final long THIRTY_PER_HEAP_SIZE = (long) (JvmInfo.jvmInfo().getMem().getHeapMax().getBytes() * 0.3); + protected final Settings settings; + private final Version version; + protected final ThreadPool threadPool; + protected final PageCacheRecycler pageCacheRecycler; + protected final NetworkService networkService; + protected final Set profileSettings; + private final CircuitBreakerService circuitBreakerService; + private volatile BoundTransportAddress boundAddress; + + private final ConcurrentMap profileBoundAddresses = newConcurrentMap(); + private final Map> serverChannels = newConcurrentMap(); + private final Set acceptedChannels = ConcurrentCollections.newConcurrentSet(); + private final OutboundHandler outboundHandler; + private final InboundHandler inboundHandler; + private final TransportKeepAlive keepAlive; + private final TransportHandshaker handshaker; + private final transportservice.transport.Transport.ResponseHandlers responseHandlers = new transportservice.transport.Transport.ResponseHandlers(); + private final transportservice.transport.Transport.RequestHandlers requestHandlers = new transportservice.transport.Transport.RequestHandlers(); + + + private final ReadWriteLock closeLock = new ReentrantReadWriteLock(); + final StatsTracker statsTracker = new StatsTracker(); + private final AtomicLong outboundConnectionCount = new AtomicLong(); // also used as a correlation ID for open/close logs + + + public TcpTransport( + Settings settings, + Version version, + ThreadPool threadPool, + PageCacheRecycler pageCacheRecycler, + CircuitBreakerService circuitBreakerService, + NamedWriteableRegistry namedWriteableRegistry, + NetworkService networkService + ) { + this.settings = settings; + this.profileSettings = getProfileSettings(Settings.builder().put("transport.profiles.test.port", "5555").put("transport.profiles.default.port", "3333").build()); + this.version = version; + this.threadPool = threadPool; + this.pageCacheRecycler = pageCacheRecycler; + this.circuitBreakerService = circuitBreakerService; + this.networkService = networkService; + String nodeName = Node.NODE_NAME_SETTING.get(settings); + final Settings defaultFeatures = TransportSettings.DEFAULT_FEATURES_SETTING.get(settings); + + String[] features; + if (defaultFeatures == null) { + features = new String[0]; + } else { + defaultFeatures.names().forEach(key -> { + if (Booleans.parseBoolean(defaultFeatures.get(key)) == false) { + throw new IllegalArgumentException("feature settings must have default [true] value"); + } + }); + // use a sorted set to present the features in a consistent order + features = new TreeSet<>(defaultFeatures.names()).toArray(new String[defaultFeatures.names().size()]); + } + BigArrays bigArrays = new BigArrays(pageCacheRecycler, circuitBreakerService, CircuitBreaker.IN_FLIGHT_REQUESTS); + this.outboundHandler = new OutboundHandler(nodeName, version, features, statsTracker, threadPool, bigArrays); + this.handshaker = new TransportHandshaker( + version, + threadPool, + (node, channel, requestId, v) -> outboundHandler.sendRequest( + node, + channel, + requestId, + TransportHandshaker.HANDSHAKE_ACTION_NAME, + new TransportHandshaker.HandshakeRequest(version), + TransportRequestOptions.EMPTY, + v, + false, + true + ) + ); + this.keepAlive = new TransportKeepAlive(threadPool, this.outboundHandler::sendBytes); + this.inboundHandler = new InboundHandler( + threadPool, + outboundHandler, + namedWriteableRegistry, + handshaker, + keepAlive, + requestHandlers, + responseHandlers + ); + } + + public final int getNumPendingHandshakes() { + return this.handshaker.getNumPendingHandshakes(); + } + + public StatsTracker getStatsTracker() { + return statsTracker; + } + + public Supplier getInflightBreaker() { + return () -> circuitBreakerService.getBreaker(CircuitBreaker.IN_FLIGHT_REQUESTS); + } + + @Override + protected void doStart() { + + } + + protected abstract void stopInternal(); + + public void inboundMessage(TcpChannel channel, InboundMessage message) { + try { + inboundHandler.inboundMessage(channel, message); + } catch (Exception e) { + onException(channel, e); + } + } + + public final long getNumHandshakes() { + return handshaker.getNumHandshakes(); + } + public final TransportKeepAlive getKeepAlive() { + return keepAlive; + } + + + /** + * Validates the first 6 bytes of the message header and returns the length of the message. If 6 bytes + * are not available, it returns -1. + * + * @param networkBytes the will be read + * @return the length of the message + * @throws StreamCorruptedException if the message header format is not recognized + * @throws org.opensearch.transport.TcpTransport.HttpRequestOnTransportException if the message header appears to be an HTTP message + * @throws IllegalArgumentException if the message length is greater that the maximum allowed frame size. + * This is dependent on the available memory. + */ + + public static int readMessageLength(BytesReference networkBytes) throws IOException { + if (networkBytes.length() < BYTES_NEEDED_FOR_MESSAGE_SIZE) { + return -1; + } else { + return readHeaderBuffer(networkBytes); + } + } + + private static int readHeaderBuffer(BytesReference headerBuffer) throws IOException { + if (headerBuffer.get(0) != 'E' || headerBuffer.get(1) != 'S') { + if (appearsToBeHTTPRequest(headerBuffer)) { + throw new TcpTransport.HttpRequestOnTransportException("This is not an HTTP port"); + } + + if (appearsToBeHTTPResponse(headerBuffer)) { + throw new StreamCorruptedException( + "received HTTP response on transport port, ensure that transport port (not " + + "HTTP port) of a remote node is specified in the configuration" + ); + } + + String firstBytes = "(" + + Integer.toHexString(headerBuffer.get(0) & 0xFF) + + "," + + Integer.toHexString(headerBuffer.get(1) & 0xFF) + + "," + + Integer.toHexString(headerBuffer.get(2) & 0xFF) + + "," + + Integer.toHexString(headerBuffer.get(3) & 0xFF) + + ")"; + + if (appearsToBeTLS(headerBuffer)) { + throw new StreamCorruptedException("SSL/TLS request received but SSL/TLS is not enabled on this node, got " + firstBytes); + } + + throw new StreamCorruptedException("invalid internal transport message format, got " + firstBytes); + } + final int messageLength = headerBuffer.getInt(TcpHeader.MARKER_BYTES_SIZE); + + if (messageLength == TransportKeepAlive.PING_DATA_SIZE) { + // This is a ping + return 0; + } + + if (messageLength <= 0) { + throw new StreamCorruptedException("invalid data length: " + messageLength); + } + + if (messageLength > THIRTY_PER_HEAP_SIZE) { + throw new IllegalArgumentException( + "transport content length received [" + + new ByteSizeValue(messageLength) + + "] exceeded [" + + new ByteSizeValue(THIRTY_PER_HEAP_SIZE) + + "]" + ); + } + + return messageLength; + } + + private static boolean appearsToBeHTTPRequest(BytesReference headerBuffer) { + return bufferStartsWith(headerBuffer, "GET") + || bufferStartsWith(headerBuffer, "POST") + || bufferStartsWith(headerBuffer, "PUT") + || bufferStartsWith(headerBuffer, "HEAD") + || bufferStartsWith(headerBuffer, "DELETE") + // Actually 'OPTIONS'. But we are only guaranteed to have read six bytes at this point. + || bufferStartsWith(headerBuffer, "OPTION") + || bufferStartsWith(headerBuffer, "PATCH") + || bufferStartsWith(headerBuffer, "TRACE"); + } + + private static boolean appearsToBeHTTPResponse(BytesReference headerBuffer) { + return bufferStartsWith(headerBuffer, "HTTP"); + } + + private static boolean appearsToBeTLS(BytesReference headerBuffer) { + return headerBuffer.get(0) == 0x16 && headerBuffer.get(1) == 0x03; + } + + private static boolean bufferStartsWith(BytesReference buffer, String method) { + char[] chars = method.toCharArray(); + for (int i = 0; i < chars.length; i++) { + if (buffer.get(i) != chars[i]) { + return false; + } + } + return true; + } + + /** + * A helper exception to mark an incoming connection as potentially being HTTP + * so an appropriate error code can be returned + */ + public static class HttpRequestOnTransportException extends OpenSearchException { + + HttpRequestOnTransportException(String msg) { + super(msg); + } + + @Override + public RestStatus status() { + return RestStatus.BAD_REQUEST; + } + + public HttpRequestOnTransportException(StreamInput in) throws IOException { + super(in); + } + } + + + + @Override + protected void doStop() { + final CountDownLatch latch = new CountDownLatch(1); + // make sure we run it on another thread than a possible IO handler thread + assert threadPool.generic().isShutdown() == false : "Must stop transport before terminating underlying threadpool"; + threadPool.generic().execute(() -> { + closeLock.writeLock().lock(); + try { + keepAlive.close(); + + // first stop to accept any incoming connections so nobody can connect to this transport + for (Map.Entry> entry : serverChannels.entrySet()) { + String profile = entry.getKey(); + List channels = entry.getValue(); + ActionListener closeFailLogger = ActionListener.wrap( + c -> {}, + e -> logger.warn(() -> new ParameterizedMessage("Error closing serverChannel for profile [{}]", profile), e) + ); + channels.forEach(c -> c.addCloseListener(closeFailLogger)); + CloseableChannel.closeChannels(channels, true); + } + serverChannels.clear(); + + // close all of the incoming channels. The closeChannels method takes a list so we must convert the set. + CloseableChannel.closeChannels(new ArrayList<>(acceptedChannels), true); + acceptedChannels.clear(); + + stopInternal(); + } finally { + closeLock.writeLock().unlock(); + latch.countDown(); + } + }); + + try { + latch.await(30, TimeUnit.SECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + // ignore + } + } + + @Override + protected void doClose() throws IOException { + + } + + @Override + public void setMessageListener(transportservice.transport.TransportMessageListener transportMessageListener) { + outboundHandler.setMessageListener(transportMessageListener); + inboundHandler.setMessageListener(transportMessageListener); + } + + @Override + public BoundTransportAddress boundAddress() { + return this.boundAddress; + } + + @Override + public Map profileBoundAddresses() { + return unmodifiableMap(new HashMap<>(profileBoundAddresses)); + } + + @Override + public TransportAddress[] addressesFromString(String s) throws UnknownHostException { + return new TransportAddress[0]; + } + + @Override + public List getDefaultSeedAddresses() { + return null; + } + + @Override + public void openConnection(DiscoveryNode discoveryNode, ConnectionProfile connectionProfile, transportservice.action.ActionListener actionListener) { + + } + + @Override + public TransportStats getStats() { + final MeanMetric writeBytesMetric = statsTracker.getWriteBytes(); + final long bytesWritten = statsTracker.getBytesWritten(); + final long messagesSent = statsTracker.getMessagesSent(); + final long messagesReceived = statsTracker.getMessagesReceived(); + final long bytesRead = statsTracker.getBytesRead(); + return new TransportStats( + acceptedChannels.size(), + outboundConnectionCount.get(), + messagesReceived, + bytesRead, + messagesSent, + bytesWritten + ); + } + + @Override + public ResponseHandlers getResponseHandlers() { + return responseHandlers; + } + + @Override + public RequestHandlers getRequestHandlers() { + return requestHandlers; + } + + private InetSocketAddress bindToPort(final String name, final InetAddress hostAddress, String port) { + PortsRange portsRange = new PortsRange(port); + final AtomicReference lastException = new AtomicReference<>(); + final AtomicReference boundSocket = new AtomicReference<>(); + closeLock.writeLock().lock(); + try { + // No need for locking here since Lifecycle objects can't move from STARTED to INITIALIZED + if (lifecycle.initialized() == false && lifecycle.started() == false) { + throw new IllegalStateException("transport has been stopped"); + } + boolean success = portsRange.iterate(portNumber -> { + try { + TcpServerChannel channel = bind(name, new InetSocketAddress(hostAddress, portNumber)); + serverChannels.computeIfAbsent(name, k -> new ArrayList<>()).add(channel); + boundSocket.set(channel.getLocalAddress()); + } catch (Exception e) { + lastException.set(e); + return false; + } + return true; + }); + if (!success) { + throw new BindTransportException( + "Failed to bind to " + NetworkAddress.format(hostAddress, portsRange), + lastException.get() + ); + } + } finally { + closeLock.writeLock().unlock(); + } + if (logger.isDebugEnabled()) { + logger.debug("Bound profile [{}] to address {{}}", name, NetworkAddress.format(boundSocket.get())); + } + + return boundSocket.get(); + } + + protected void bindServer(ProfileSettings profileSettings) { + System.out.println("INSIDE BIND SERVER"); + // Bind and start to accept incoming connections. + InetAddress[] hostAddresses; + List profileBindHosts = profileSettings.bindHosts; + try { + hostAddresses = networkService.resolveBindHostAddresses(profileBindHosts.toArray(Strings.EMPTY_ARRAY)); + } catch (IOException e) { + throw new BindTransportException("Failed to resolve host " + profileBindHosts, e); + } + if (logger.isDebugEnabled()) { + String[] addresses = new String[hostAddresses.length]; + for (int i = 0; i < hostAddresses.length; i++) { + addresses[i] = NetworkAddress.format(hostAddresses[i]); + } + logger.debug("binding server bootstrap to: {}", (Object) addresses); + } + + assert hostAddresses.length > 0; + + List boundAddresses = new ArrayList<>(); + for (InetAddress hostAddress : hostAddresses) { + boundAddresses.add(bindToPort(profileSettings.profileName, hostAddress, profileSettings.portOrRange)); + } + + final BoundTransportAddress boundTransportAddress = createBoundTransportAddress(profileSettings, boundAddresses); + + if (profileSettings.isDefaultProfile) { + this.boundAddress = boundTransportAddress; + } else { + profileBoundAddresses.put(profileSettings.profileName, boundTransportAddress); + } + } + + protected void onServerException(TcpServerChannel channel, Exception e) { + if (e instanceof BindException) { + logger.debug(() -> new ParameterizedMessage("bind exception from server channel caught on transport layer [{}]", channel), e); + } else { + logger.error(new ParameterizedMessage("exception from server channel caught on transport layer [{}]", channel), e); + } + } + + protected void serverAcceptedChannel(TcpChannel channel) { + boolean addedOnThisCall = acceptedChannels.add(channel); + assert addedOnThisCall : "Channel should only be added to accepted channel set once"; + // Mark the channel init time +// channel.getChannelStats().markAccessed(threadPool.relativeTimeInMillis()); + channel.addCloseListener(ActionListener.wrap(() -> acceptedChannels.remove(channel))); + logger.trace(() -> new ParameterizedMessage("Tcp transport channel accepted: {}", channel)); + } + + /** + * Binds to the given {@link InetSocketAddress} + * + * @param name the profile name + * @param address the address to bind to + */ + protected abstract TcpServerChannel bind(String name, InetSocketAddress address) throws IOException; + + + private BoundTransportAddress createBoundTransportAddress(ProfileSettings profileSettings, List boundAddresses) { + String[] boundAddressesHostStrings = new String[boundAddresses.size()]; + TransportAddress[] transportBoundAddresses = new TransportAddress[boundAddresses.size()]; + for (int i = 0; i < boundAddresses.size(); i++) { + InetSocketAddress boundAddress = boundAddresses.get(i); + boundAddressesHostStrings[i] = boundAddress.getHostString(); + transportBoundAddresses[i] = new TransportAddress(boundAddress); + } + + List publishHosts = profileSettings.publishHosts; + if (profileSettings.isDefaultProfile == false && publishHosts.isEmpty()) { + publishHosts = Arrays.asList(boundAddressesHostStrings); + } + if (publishHosts.isEmpty()) { + publishHosts = NetworkService.GLOBAL_NETWORK_PUBLISH_HOST_SETTING.get(settings); + } + + final InetAddress publishInetAddress; + try { + publishInetAddress = networkService.resolvePublishHostAddresses(publishHosts.toArray(Strings.EMPTY_ARRAY)); + } catch (Exception e) { + throw new BindTransportException("Failed to resolve publish address", e); + } + + final int publishPort = resolvePublishPort(profileSettings, boundAddresses, publishInetAddress); + final TransportAddress publishAddress = new TransportAddress(new InetSocketAddress(publishInetAddress, publishPort)); + return new BoundTransportAddress(transportBoundAddresses, publishAddress); + } + + // package private for tests + static int resolvePublishPort(ProfileSettings profileSettings, List boundAddresses, InetAddress publishInetAddress) { + int publishPort = profileSettings.publishPort; + + // if port not explicitly provided, search for port of address in boundAddresses that matches publishInetAddress + if (publishPort < 0) { + for (InetSocketAddress boundAddress : boundAddresses) { + InetAddress boundInetAddress = boundAddress.getAddress(); + if (boundInetAddress.isAnyLocalAddress() || boundInetAddress.equals(publishInetAddress)) { + publishPort = boundAddress.getPort(); + break; + } + } + } + + // if no matching boundAddress found, check if there is a unique port for all bound addresses + if (publishPort < 0) { + final IntSet ports = new IntHashSet(); + for (InetSocketAddress boundAddress : boundAddresses) { + ports.add(boundAddress.getPort()); + } + if (ports.size() == 1) { + publishPort = ports.iterator().next().value; + } + } + + if (publishPort < 0) { + String profileExplanation = profileSettings.isDefaultProfile ? "" : " for profile " + profileSettings.profileName; + throw new BindTransportException( + "Failed to auto-resolve publish port" + + profileExplanation + + ", multiple bound addresses " + + boundAddresses + + " with distinct ports and none of them matched the publish address (" + + publishInetAddress + + "). " + + "Please specify a unique port by setting " + + TransportSettings.PORT.getKey() + + " or " + + TransportSettings.PUBLISH_PORT.getKey() + ); + } + return publishPort; + } + + /** + * Returns all profile settings for the given settings object + */ + public static Set getProfileSettings(Settings settings) { + HashSet profiles = new HashSet<>(); + boolean isDefaultSet = false; + for (String profile : settings.getGroups("transport.profiles.", true).keySet()) { + profiles.add(new ProfileSettings(settings, profile)); + if (TransportSettings.DEFAULT_PROFILE.equals(profile)) { + isDefaultSet = true; + } + } + if (isDefaultSet == false) { + profiles.add(new ProfileSettings(settings, TransportSettings.DEFAULT_PROFILE)); + } + return Collections.unmodifiableSet(profiles); + } + + public ThreadPool getThreadPool() { + return threadPool; + } + + public Version getVersion() { + return version; + } + + public void onException(TcpChannel channel, Exception e) { + handleException(channel, e, lifecycle); + } + + // exposed for tests + static void handleException(TcpChannel channel, Exception e, Lifecycle lifecycle) { + if (!lifecycle.started()) { + // just close and ignore - we are already stopped and just need to make sure we release all resources + CloseableChannel.closeChannel(channel); + return; + } + + if (isCloseConnectionException(e)) { + logger.debug( + () -> new ParameterizedMessage( + "close connection exception caught on transport layer [{}], disconnecting from relevant node", + channel + ), + e + ); + // close the channel, which will cause a node to be disconnected if relevant + CloseableChannel.closeChannel(channel); + } else if (isConnectException(e)) { + logger.debug(() -> new ParameterizedMessage("connect exception caught on transport layer [{}]", channel), e); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + CloseableChannel.closeChannel(channel); + } else if (e instanceof BindException) { + logger.debug(() -> new ParameterizedMessage("bind exception caught on transport layer [{}]", channel), e); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + CloseableChannel.closeChannel(channel); + } else if (e instanceof CancelledKeyException) { + logger.debug( + () -> new ParameterizedMessage( + "cancelled key exception caught on transport layer [{}], disconnecting from relevant node", + channel + ), + e + ); + // close the channel as safe measure, which will cause a node to be disconnected if relevant + CloseableChannel.closeChannel(channel); + } else if (e instanceof org.opensearch.transport.TcpTransport.HttpRequestOnTransportException) { + // in case we are able to return data, serialize the exception content and sent it back to the client + if (channel.isOpen()) { + BytesArray message = new BytesArray(e.getMessage().getBytes(StandardCharsets.UTF_8)); + + } + } else if (e instanceof StreamCorruptedException) { + logger.warn(() -> new ParameterizedMessage("{}, [{}], closing connection", e.getMessage(), channel)); + CloseableChannel.closeChannel(channel); + } else { + logger.warn(() -> new ParameterizedMessage("exception caught on transport layer [{}], closing connection", channel), e); + // close the channel, which will cause a node to be disconnected if relevant + CloseableChannel.closeChannel(channel); + } + } + + + /** + * Representation of a transport profile settings for a {@code transport.profiles.$profilename.*} + */ + public static final class ProfileSettings { + public final String profileName; + public final boolean tcpNoDelay; + public final boolean tcpKeepAlive; + public final int tcpKeepIdle; + public final int tcpKeepInterval; + public final int tcpKeepCount; + public final boolean reuseAddress; + public final ByteSizeValue sendBufferSize; + public final ByteSizeValue receiveBufferSize; + public final List bindHosts; + public final List publishHosts; + public final String portOrRange; + public final int publishPort; + public final boolean isDefaultProfile; + + public ProfileSettings(Settings settings, String profileName) { + this.profileName = profileName; + isDefaultProfile = TransportSettings.DEFAULT_PROFILE.equals(profileName); + tcpKeepAlive = TransportSettings.TCP_KEEP_ALIVE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + tcpKeepIdle = TransportSettings.TCP_KEEP_IDLE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + tcpKeepInterval = TransportSettings.TCP_KEEP_INTERVAL_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + tcpKeepCount = TransportSettings.TCP_KEEP_COUNT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + tcpNoDelay = TransportSettings.TCP_NO_DELAY_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + reuseAddress = TransportSettings.TCP_REUSE_ADDRESS_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + sendBufferSize = TransportSettings.TCP_SEND_BUFFER_SIZE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + receiveBufferSize = TransportSettings.TCP_RECEIVE_BUFFER_SIZE_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + List profileBindHosts = TransportSettings.BIND_HOST_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + bindHosts = (profileBindHosts.isEmpty() ? NetworkService.GLOBAL_NETWORK_BIND_HOST_SETTING.get(settings) : profileBindHosts); + publishHosts = TransportSettings.PUBLISH_HOST_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + Setting concretePort = TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(profileName); + if (concretePort.exists(settings) == false && isDefaultProfile == false) { + throw new IllegalStateException("profile [" + profileName + "] has no port configured"); + } + portOrRange = TransportSettings.PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + publishPort = isDefaultProfile + ? TransportSettings.PUBLISH_PORT.get(settings) + : TransportSettings.PUBLISH_PORT_PROFILE.getConcreteSettingForNamespace(profileName).get(settings); + } + } + + + +} diff --git a/src/main/java/TestRequest.java b/src/main/java/transportservice/TestRequest.java similarity index 96% rename from src/main/java/TestRequest.java rename to src/main/java/transportservice/TestRequest.java index 741ca86..ad4a72f 100644 --- a/src/main/java/TestRequest.java +++ b/src/main/java/transportservice/TestRequest.java @@ -1,4 +1,4 @@ -// +package transportservice;// // Source code recreated from a .class file by IntelliJ IDEA // (powered by FernFlower decompiler) // diff --git a/src/main/java/TestThreadPool.java b/src/main/java/transportservice/TestThreadPool.java similarity index 99% rename from src/main/java/TestThreadPool.java rename to src/main/java/transportservice/TestThreadPool.java index 8e06b5c..d3772ce 100644 --- a/src/main/java/TestThreadPool.java +++ b/src/main/java/transportservice/TestThreadPool.java @@ -1,4 +1,4 @@ -// +package transportservice;// // Source code recreated from a .class file by IntelliJ IDEA // (powered by FernFlower decompiler) // diff --git a/src/main/java/transportservice/TransportInterceptor.java b/src/main/java/transportservice/TransportInterceptor.java new file mode 100644 index 0000000..cd7d6cc --- /dev/null +++ b/src/main/java/transportservice/TransportInterceptor.java @@ -0,0 +1,82 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.Writeable.Reader; +import org.opensearch.transport.*; + +/** + * This interface allows plugins to intercept requests on both the sender and the receiver side. + */ +public interface TransportInterceptor { + /** + * This is called for each handler that is registered via + * {@link TransportService#registerRequestHandler(String, String, boolean, boolean, Reader, TransportRequestHandler)} or + * {@link TransportService#registerRequestHandler(String, String, Reader, TransportRequestHandler)}. The returned handler is + * used instead of the passed in handler. By default the provided handler is returned. + */ + default TransportRequestHandler interceptHandler( + String action, + String executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + return actualHandler; + } + + /** + * This is called up-front providing the actual low level {@link AsyncSender} that performs the low level send request. + * The returned sender is used to send all requests that come in via + * {@link TransportService#sendRequest(DiscoveryNode, String, TransportRequest, TransportResponseHandler)} or + * {@link TransportService#sendRequest(DiscoveryNode, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)}. + * This allows plugins to perform actions on each send request including modifying the request context etc. + */ + default AsyncSender interceptSender(AsyncSender sender) { + return sender; + } + + /** + * A simple interface to decorate + * {@link #sendRequest(Transport.Connection, String, TransportRequest, TransportRequestOptions, TransportResponseHandler)} + */ + interface AsyncSender { + void sendRequest( + transportservice.transport.Transport.Connection connection, + String action, + TransportRequest request, + TransportRequestOptions options, + TransportResponseHandler handler + ); + } +} diff --git a/src/main/java/transportservice/TransportService.java b/src/main/java/transportservice/TransportService.java new file mode 100644 index 0000000..b19eed3 --- /dev/null +++ b/src/main/java/transportservice/TransportService.java @@ -0,0 +1,917 @@ +package transportservice; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.LegacyESVersion; +import org.opensearch.Version; +import org.opensearch.cluster.ClusterName; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.logging.Loggers; +import org.opensearch.common.regex.Regex; +import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.node.NodeClosedException; +import org.opensearch.node.ReportingService; +import org.opensearch.threadpool.Scheduler; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.*; +import transportservice.action.ActionListenerResponseHandler; +import transportservice.action.PlainActionFuture; +import transportservice.component.AbstractLifecycleComponent; +import transportservice.transport.ConnectionProfile; +import transportservice.transport.TransportConnectionListener; +import transportservice.transport.TransportMessageListener; +import transportservice.transport.Transport; +import transportservice.action.ActionListener; +import transportservice.transport.ConnectionManager; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.*; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class TransportService extends AbstractLifecycleComponent + implements + ReportingService, + TransportMessageListener, + TransportConnectionListener { + + protected final Transport transport; + protected final ConnectionManager connectionManager; + private static final Logger logger = LogManager.getLogger(TransportService.class); + private final Transport.ResponseHandlers responseHandlers; + protected final ThreadPool threadPool; + private final Function localNodeFactory; + private final DelegatingTransportMessageListener messageListener = new DelegatingTransportMessageListener(); + volatile DiscoveryNode localNode = null; + public static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; + private final TransportInterceptor.AsyncSender asyncSender; + private final TransportInterceptor interceptor; + private final Logger tracerLog; + volatile String[] tracerLogInclude; + volatile String[] tracerLogExclude; + private final AtomicBoolean handleIncomingRequests = new AtomicBoolean(); + public static final String DIRECT_RESPONSE_PROFILE = ".direct"; + final Map timeoutInfoHandlers = Collections.synchronizedMap( + new LinkedHashMap(100, .75F, true) { + @Override + protected boolean removeEldestEntry(Map.Entry eldest) { + return size() > 100; + } + } + ); + + public static final TransportInterceptor NOOP_TRANSPORT_INTERCEPTOR = new TransportInterceptor() { + }; + + private final Transport.Connection localNodeConnection = new Transport.Connection() { + @Override + public DiscoveryNode getNode() { + return localNode; + } + + @Override + public void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) + throws TransportException { + sendLocalRequest(requestId, action, request, options); + } + + @Override + public void addCloseListener(ActionListener listener) {} + + @Override + public boolean isClosed() { + return false; + } + + @Override + public void close() {} + }; + + + public TransportService(Transport transport, ConnectionManager connectionManager, Transport.ResponseHandlers responseHandlers, ThreadPool threadPool, Function localNodeFactory, TransportInterceptor transportInterceptor) { + this.transport = transport; + this.connectionManager = connectionManager; + this.responseHandlers = responseHandlers; + this.threadPool = threadPool; + this.localNodeFactory = localNodeFactory; + this.interceptor = transportInterceptor; + this.asyncSender = interceptor.interceptSender(this::sendRequestInternal); + tracerLog = Loggers.getLogger(logger, ".tracer"); + } + + @Override + protected void doStart() { + transport.setMessageListener(this); + connectionManager.addListener(this); + transport.start(); + if (transport.boundAddress() != null) { + logger.info("{}", transport.boundAddress()); + for (Map.Entry entry : transport.profileBoundAddresses().entrySet()) { + logger.info("profile [{}]: {}", entry.getKey(), entry.getValue()); + } + } + + // localNode = localNodeFactory.apply(transport.boundAddress()); + + } + + public void addConnectionListener(transportservice.transport.TransportConnectionListener listener) { + connectionManager.addListener(listener); + } + + private ExecutorService getExecutorService() { + return threadPool.generic(); + } + + @Override + protected void doStop() { + try { + IOUtils.close(connectionManager, null, transport::stop); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + // in case the transport is not connected to our local node (thus cleaned on node disconnect) + // make sure to clean any leftover on going handles + for (final Transport.ResponseContext holderToNotify : responseHandlers.prune(h -> true)) { + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows + getExecutorService().execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug( + () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + holderToNotify.action() + ), + e + ); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + holderToNotify.action() + ), + e + ); + } + + @Override + public void doRun() { + TransportException ex = new SendRequestTransportException( + holderToNotify.connection().getNode(), + holderToNotify.action(), + null + ); + holderToNotify.handler().handleException(ex); + } + }); + } + } + } + + // Connect to Node + + private boolean isLocalNode(DiscoveryNode discoveryNode) { + return Objects.requireNonNull(discoveryNode, "discovery node must not be null").equals(localNode); + } + + /** + * Connect to the specified node with the default connection profile + * + * @param node the node to connect to + */ + public void connectToNode(DiscoveryNode node) throws ConnectTransportException { + connectToNode(node, null); + } + + public void connectToNode(final DiscoveryNode node, ConnectionProfile connectionProfile) { + PlainActionFuture.get(fut -> connectToNode(node, connectionProfile, ActionListener.map(fut, x -> null))); + } + + public void connectToNode(final DiscoveryNode node, ConnectionProfile connectionProfile, ActionListener listener) { + if (isLocalNode(node)) { + listener.onResponse(null); + return; + } + connectionManager.connectToNode(node, connectionProfile, connectionValidator(node), listener); + } + + public ConnectionManager.ConnectionValidator connectionValidator(DiscoveryNode node) { + return (newConnection, actualProfile, listener) -> { + // We don't validate cluster names to allow for CCS connections. + handshake(newConnection, actualProfile.getHandshakeTimeout().millis(), cn -> true, ActionListener.map(listener, resp -> { + final DiscoveryNode remote = resp.discoveryNode; + if (node.equals(remote) == false) { + throw new ConnectTransportException(node, "handshake failed. unexpected remote node " + remote); + } + return null; + })); + }; + } + + + /** + * Establishes a new connection to the given node. The connection is NOT maintained by this service, it's the callers + * responsibility to close the connection once it goes out of scope. + * The ActionListener will be called on the calling thread or the generic thread pool. + * @param node the node to connect to + * @param connectionProfile the connection profile to use + * @param listener the action listener to notify + */ + public void openConnection( + final DiscoveryNode node, + transportservice.transport.ConnectionProfile connectionProfile, + ActionListener listener + ) { + if (isLocalNode(node)) { + listener.onResponse(localNodeConnection); + } else { + connectionManager.openConnection(node, connectionProfile, listener); + } + } + + + public void handshake( + final Transport.Connection connection, + final long handshakeTimeout, + Predicate clusterNamePredicate, + final ActionListener listener + ) { + final DiscoveryNode node = connection.getNode(); + sendRequest( + connection, + HANDSHAKE_ACTION_NAME, + HandshakeRequest.INSTANCE, + TransportRequestOptions.builder().withTimeout(handshakeTimeout).build(), + new ActionListenerResponseHandler<>(new ActionListener() { + @Override + public void onResponse(HandshakeResponse response) { + if (clusterNamePredicate.test(response.clusterName) == false) { + listener.onFailure( + new IllegalStateException( + "handshake with [" + + node + + "] failed: remote cluster name [" + + response.clusterName.value() + + "] does not match " + + clusterNamePredicate + ) + ); + } else if (response.version.isCompatible(localNode.getVersion()) == false) { + listener.onFailure( + new IllegalStateException( + "handshake with [" + + node + + "] failed: remote node version [" + + response.version + + "] is incompatible with local node version [" + + localNode.getVersion() + + "]" + ) + ); + } else { + listener.onResponse(response); + } + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }, HandshakeResponse::new, ThreadPool.Names.GENERIC) + ); + } + + public final void sendRequest( + final Transport.Connection connection, + final String action, + final TransportRequest request, + final TransportRequestOptions options, + final TransportResponseHandler handler + ) { + try { + final TransportResponseHandler delegate; + if (request.getParentTask().isSet()) { + // TODO: capture the connection instead so that we can cancel child tasks on the remote connections. + // final Releasable unregisterChildNode = taskManager.registerChildNode(request.getParentTask().getId(), connection.getNode()); + delegate = new TransportResponseHandler() { + @Override + public void handleResponse(T response) { + // unregisterChildNode.close(); + handler.handleResponse(response); + } + + @Override + public void handleException(TransportException exp) { + // unregisterChildNode.close(); + handler.handleException(exp); + } + + @Override + public String executor() { + return handler.executor(); + } + + @Override + public T read(StreamInput in) throws IOException { + return handler.read(in); + } + + @Override + public String toString() { + return getClass().getName() + "/[" + action + "]:" + handler.toString(); + } + }; + } else { + delegate = handler; + } + asyncSender.sendRequest(connection, action, request, options, delegate); + } catch (final Exception ex) { + // the caller might not handle this so we invoke the handler + final TransportException te; + if (ex instanceof TransportException) { + te = (TransportException) ex; + } else { + te = new TransportException("failure to send", ex); + } + handler.handleException(te); + } + } + + // Async sender + + + private void sendRequestInternal( + final Transport.Connection connection, + final String action, + final TransportRequest request, + final TransportRequestOptions options, + TransportResponseHandler handler + ) { + if (connection == null) { + throw new IllegalStateException("can't send request to a null connection"); + } + DiscoveryNode node = connection.getNode(); + + Supplier storedContextSupplier = threadPool.getThreadContext().newRestorableContext(true); + ContextRestoreResponseHandler responseHandler = new ContextRestoreResponseHandler<>(storedContextSupplier, handler); + // TODO we can probably fold this entire request ID dance into connection.sendReqeust but it will be a bigger refactoring + // DUMMY VALUE + final long requestId = 256; + final TransportService.TimeoutHandler timeoutHandler; + if (options.timeout() != null) { + timeoutHandler = new TimeoutHandler(requestId, connection.getNode(), action); + responseHandler.setTimeoutHandler(timeoutHandler); + } else { + timeoutHandler = null; + } + try { + if (lifecycle.stoppedOrClosed()) { + /* + * If we are not started the exception handling will remove the request holder again and calls the handler to notify the + * caller. It will only notify if toStop hasn't done the work yet. + */ + throw new NodeClosedException(localNode); + } + if (timeoutHandler != null) { + assert options.timeout() != null; + timeoutHandler.scheduleTimeout(options.timeout()); + } + connection.sendRequest(requestId, action, request, options); // local node optimization happens upstream + } catch (final Exception e) { + // usually happen either because we failed to connect to the node + // or because we failed serializing the message + final Transport.ResponseContext contextToNotify = responseHandlers.remove(requestId); + // If holderToNotify == null then handler has already been taken care of. + if (contextToNotify != null) { + if (timeoutHandler != null) { + timeoutHandler.cancel(); + } + // callback that an exception happened, but on a different thread since we don't + // want handlers to worry about stack overflows. In the special case of running into a closing node we run on the current + // thread on a best effort basis though. + final SendRequestTransportException sendRequestException = new SendRequestTransportException(node, action, e); + final String executor = lifecycle.stoppedOrClosed() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC; + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + public void onRejection(Exception e) { + // if we get rejected during node shutdown we don't wanna bubble it up + logger.debug( + () -> new ParameterizedMessage( + "failed to notify response handler on rejection, action: {}", + contextToNotify.action() + ), + e + ); + } + + @Override + public void onFailure(Exception e) { + logger.warn( + () -> new ParameterizedMessage( + "failed to notify response handler on exception, action: {}", + contextToNotify.action() + ), + e + ); + } + + @Override + protected void doRun() throws Exception { + contextToNotify.handler().handleException(sendRequestException); + } + }); + } else { + logger.debug("Exception while sending request, handler likely already notified due to timeout", e); + } + } + } + + public RequestHandlerRegistry getRequestHandler(String action) { + return transport.getRequestHandlers().getHandler(action); + } + + private void sendLocalRequest(long requestId, final String action, final TransportRequest request, TransportRequestOptions options) { + final DirectResponseChannel channel = new DirectResponseChannel(localNode, action, requestId, this, threadPool); + try { + onRequestSent(localNode, requestId, action, request, options); + onRequestReceived(requestId, action); + final RequestHandlerRegistry reg = getRequestHandler(action); + if (reg == null) { + throw new ActionNotFoundTransportException("Action [" + action + "] not found"); + } + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + // noinspection unchecked + reg.processMessageReceived(request, channel); + } else { + threadPool.executor(executor).execute(new AbstractRunnable() { + @Override + protected void doRun() throws Exception { + // noinspection unchecked + reg.processMessageReceived(request, channel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn( + () -> new ParameterizedMessage("failed to notify channel of error message for action [{}]", action), + inner + ); + } + } + + @Override + public String toString() { + return "processing of [" + requestId + "][" + action + "]: " + request; + } + }); + } + + } catch (Exception e) { + try { + channel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage("failed to notify channel of error message for action [{}]", action), inner); + } + } + } + + + + //Ends here + + + @Override + protected void doClose() throws IOException { + + } + + /** + * start accepting incoming requests. + * when the transport layer starts up it will block any incoming requests until + * this method is called + */ + public final void acceptIncomingRequests() { + handleIncomingRequests.set(true); + } + + private boolean shouldTraceAction(String action) { + return shouldTraceAction(action, tracerLogInclude, tracerLogExclude); + } + + public static boolean shouldTraceAction(String action, String[] include, String[] exclude) { + if (include.length > 0) { + if (Regex.simpleMatch(include, action) == false) { + return false; + } + } + if (exclude.length > 0) { + return !Regex.simpleMatch(exclude, action); + } + return true; + } + + + /** + * called by the {@link Transport} implementation when an incoming request arrives but before + * any parsing of it has happened (with the exception of the requestId and action) + */ + @Override + public void onRequestReceived(long requestId, String action) { + if (handleIncomingRequests.get() == false) { + throw new IllegalStateException("transport not ready yet to handle incoming requests"); + } + if (tracerLog.isTraceEnabled() && shouldTraceAction(action)) { + tracerLog.trace("[{}][{}] received request", requestId, action); + } + messageListener.onRequestReceived(requestId, action); + } + + @Override + public TransportInfo info() { + return null; + } + + public static class HandshakeResponse extends TransportResponse { + private final DiscoveryNode discoveryNode; + private final ClusterName clusterName; + private final Version version; + + public HandshakeResponse(DiscoveryNode discoveryNode, ClusterName clusterName, Version version) { + this.discoveryNode = discoveryNode; + this.version = version; + this.clusterName = clusterName; + } + + public HandshakeResponse(StreamInput in) throws IOException { + super(in); + discoveryNode = in.readOptionalWriteable(DiscoveryNode::new); + clusterName = new ClusterName(in); + Version tmpVersion = Version.readVersion(in); + if (in.getVersion().onOrBefore(LegacyESVersion.V_7_10_2)) { + tmpVersion = LegacyESVersion.V_7_10_2; + } + version = tmpVersion; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeOptionalWriteable(discoveryNode); + clusterName.writeTo(out); + if (out.getVersion().before(Version.V_1_0_0)) { + Version.writeVersion(LegacyESVersion.V_7_10_2, out); + } else { + Version.writeVersion(version, out); + } + } + + public DiscoveryNode getDiscoveryNode() { + return discoveryNode; + } + + public ClusterName getClusterName() { + return clusterName; + } + } + + static class HandshakeRequest extends TransportRequest { + + public static final HandshakeRequest INSTANCE = new HandshakeRequest(); + + HandshakeRequest(StreamInput in) throws IOException { + super(in); + } + + private HandshakeRequest() {} + + } + + final class TimeoutHandler implements Runnable { + + private final long requestId; + private final long sentTime = threadPool.relativeTimeInMillis(); + private final String action; + private final DiscoveryNode node; + volatile Scheduler.Cancellable cancellable; + + TimeoutHandler(long requestId, DiscoveryNode node, String action) { + this.requestId = requestId; + this.node = node; + this.action = action; + } + + @Override + public void run() { + if (responseHandlers.contains(requestId)) { + long timeoutTime = threadPool.relativeTimeInMillis(); + timeoutInfoHandlers.put(requestId, new TimeoutInfoHolder(node, action, sentTime, timeoutTime)); + // now that we have the information visible via timeoutInfoHandlers, we try to remove the request id + final Transport.ResponseContext holder = responseHandlers.remove(requestId); + if (holder != null) { + assert holder.action().equals(action); + assert holder.connection().getNode().equals(node); + holder.handler() + .handleException( + new ReceiveTimeoutTransportException( + holder.connection().getNode(), + holder.action(), + "request_id [" + requestId + "] timed out after [" + (timeoutTime - sentTime) + "ms]" + ) + ); + } else { + // response was processed, remove timeout info. + timeoutInfoHandlers.remove(requestId); + } + } + } + + /** + * cancels timeout handling. this is a best effort only to avoid running it. remove the requestId from {@link #responseHandlers} + * to make sure this doesn't run. + */ + public void cancel() { + assert responseHandlers.contains(requestId) == false : "cancel must be called after the requestId [" + + requestId + + "] has been removed from clientHandlers"; + if (cancellable != null) { + cancellable.cancel(); + } + } + + @Override + public String toString() { + return "timeout handler for [" + requestId + "][" + action + "]"; + } + + private void scheduleTimeout(TimeValue timeout) { + this.cancellable = threadPool.schedule(this, timeout, ThreadPool.Names.GENERIC); + } + } + + static class TimeoutInfoHolder { + + private final DiscoveryNode node; + private final String action; + private final long sentTime; + private final long timeoutTime; + + TimeoutInfoHolder(DiscoveryNode node, String action, long sentTime, long timeoutTime) { + this.node = node; + this.action = action; + this.sentTime = sentTime; + this.timeoutTime = timeoutTime; + } + + public DiscoveryNode node() { + return node; + } + + public String action() { + return action; + } + + public long sentTime() { + return sentTime; + } + + public long timeoutTime() { + return timeoutTime; + } + } + + public static final class ContextRestoreResponseHandler implements TransportResponseHandler { + + private final TransportResponseHandler delegate; + private final Supplier contextSupplier; + private volatile TimeoutHandler handler; + + public ContextRestoreResponseHandler(Supplier contextSupplier, TransportResponseHandler delegate) { + this.delegate = delegate; + this.contextSupplier = contextSupplier; + } + + @Override + public T read(StreamInput in) throws IOException { + return delegate.read(in); + } + + @Override + public void handleResponse(T response) { + if (handler != null) { + handler.cancel(); + } + try (ThreadContext.StoredContext ignore = contextSupplier.get()) { + delegate.handleResponse(response); + } + } + + @Override + public void handleException(TransportException exp) { + if (handler != null) { + handler.cancel(); + } + try (ThreadContext.StoredContext ignore = contextSupplier.get()) { + delegate.handleException(exp); + } + } + + @Override + public String executor() { + return delegate.executor(); + } + + @Override + public String toString() { + return getClass().getName() + "/" + delegate.toString(); + } + + void setTimeoutHandler(TimeoutHandler handler) { + this.handler = handler; + } + + } + + private static final class DelegatingTransportMessageListener implements TransportMessageListener { + + private final List listeners = new CopyOnWriteArrayList<>(); + + @Override + public void onRequestReceived(long requestId, String action) { + for (TransportMessageListener listener : listeners) { + listener.onRequestReceived(requestId, action); + } + } + + @Override + public void onResponseSent(long requestId, String action, TransportResponse response) { + for (TransportMessageListener listener : listeners) { + listener.onResponseSent(requestId, action, response); + } + } + + @Override + public void onResponseSent(long requestId, String action, Exception error) { + for (TransportMessageListener listener : listeners) { + listener.onResponseSent(requestId, action, error); + } + } + + @Override + public void onRequestSent( + DiscoveryNode node, + long requestId, + String action, + TransportRequest request, + TransportRequestOptions finalOptions + ) { + for (TransportMessageListener listener : listeners) { + listener.onRequestSent(node, requestId, action, request, finalOptions); + } + } + + @Override + public void onResponseReceived(long requestId, transportservice.transport.Transport.ResponseContext holder) { + for (TransportMessageListener listener : listeners) { + listener.onResponseReceived(requestId, holder); + } + } + } + + static class DirectResponseChannel implements TransportChannel { + final DiscoveryNode localNode; + private final String action; + private final long requestId; + final TransportService service; + final ThreadPool threadPool; + + DirectResponseChannel(DiscoveryNode localNode, String action, long requestId, TransportService service, ThreadPool threadPool) { + this.localNode = localNode; + this.action = action; + this.requestId = requestId; + this.service = service; + this.threadPool = threadPool; + } + + @Override + public String getProfileName() { + return DIRECT_RESPONSE_PROFILE; + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + service.onResponseSent(requestId, action, response); + final TransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); + // ignore if its null, the service logs it + if (handler != null) { + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + processResponse(handler, response); + } else { + threadPool.executor(executor).execute(new Runnable() { + @Override + public void run() { + processResponse(handler, response); + } + + @Override + public String toString() { + return "delivery of response to [" + requestId + "][" + action + "]: " + response; + } + }); + } + } + } + + @SuppressWarnings("unchecked") + protected void processResponse(TransportResponseHandler handler, TransportResponse response) { + try { + handler.handleResponse(response); + } catch (Exception e) { + processException(handler, wrapInRemote(new ResponseHandlerFailureTransportException(e))); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + service.onResponseSent(requestId, action, exception); + final TransportResponseHandler handler = service.responseHandlers.onResponseReceived(requestId, service); + // ignore if its null, the service logs it + if (handler != null) { + final RemoteTransportException rtx = wrapInRemote(exception); + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + processException(handler, rtx); + } else { + threadPool.executor(handler.executor()).execute(new Runnable() { + @Override + public void run() { + processException(handler, rtx); + } + + @Override + public String toString() { + return "delivery of failure response to [" + requestId + "][" + action + "]: " + exception; + } + }); + } + } + } + + protected RemoteTransportException wrapInRemote(Exception e) { + if (e instanceof RemoteTransportException) { + return (RemoteTransportException) e; + } + return new RemoteTransportException(localNode.getName(), localNode.getAddress(), action, e); + } + + protected void processException(final TransportResponseHandler handler, final RemoteTransportException rtx) { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error( + () -> new ParameterizedMessage("failed to handle exception for action [{}], handler [{}]", action, handler), + e + ); + } + } + + @Override + public String getChannelType() { + return "direct"; + } + + @Override + public Version getVersion() { + return localNode.getVersion(); + } + } + + +} diff --git a/src/main/java/transportservice/action/ActionListener.java b/src/main/java/transportservice/action/ActionListener.java new file mode 100644 index 0000000..94d13d6 --- /dev/null +++ b/src/main/java/transportservice/action/ActionListener.java @@ -0,0 +1,357 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.action; + +import org.opensearch.ExceptionsHelper; +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.CheckedFunction; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.CheckedSupplier; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.BiConsumer; +import java.util.function.Consumer; + +/** + * A listener for action responses or failures. + */ +public interface ActionListener { + /** + * Handle action response. This response may constitute a failure or a + * success but it is up to the listener to make that decision. + */ + void onResponse(Response response); + + /** + * A failure caused by an exception at some phase of the task. + */ + void onFailure(Exception e); + + /** + * Creates a listener that listens for a response (or failure) and executes the + * corresponding consumer when the response (or failure) is received. + * + * @param onResponse the checked consumer of the response, when the listener receives one + * @param onFailure the consumer of the failure, when the listener receives one + * @param the type of the response + * @return a listener that listens for responses and invokes the consumer when received + */ + static ActionListener wrap( + CheckedConsumer onResponse, + Consumer onFailure + ) { + return new ActionListener() { + @Override + public void onResponse(Response response) { + try { + onResponse.accept(response); + } catch (Exception e) { + onFailure(e); + } + } + + @Override + public void onFailure(Exception e) { + onFailure.accept(e); + } + }; + } + + /** + * Creates a listener that delegates all responses it receives to another listener. + * + * @param delegate ActionListener to wrap and delegate any exception to + * @param bc BiConsumer invoked with delegate listener and exception + * @param Type of the listener + * @return Delegating listener + */ + static ActionListener delegateResponse(ActionListener delegate, BiConsumer, Exception> bc) { + return new ActionListener() { + + @Override + public void onResponse(T r) { + delegate.onResponse(r); + } + + @Override + public void onFailure(Exception e) { + bc.accept(delegate, e); + } + }; + } + + /** + * Creates a listener that delegates all exceptions it receives to another listener. + * + * @param delegate ActionListener to wrap and delegate any exception to + * @param bc BiConsumer invoked with delegate listener and response + * @param Type of the delegating listener's response + * @param Type of the wrapped listeners + * @return Delegating listener + */ + static ActionListener delegateFailure(ActionListener delegate, BiConsumer, T> bc) { + return new ActionListener() { + + @Override + public void onResponse(T r) { + bc.accept(delegate, r); + } + + @Override + public void onFailure(Exception e) { + delegate.onFailure(e); + } + }; + } + + /** + * Creates a listener that listens for a response (or failure) and executes the + * corresponding runnable when the response (or failure) is received. + * + * @param runnable the runnable that will be called in event of success or failure + * @param the type of the response + * @return a listener that listens for responses and invokes the runnable when received + */ + static ActionListener wrap(Runnable runnable) { + return wrap(r -> runnable.run(), e -> runnable.run()); + } + + /** + * Creates a listener that wraps another listener, mapping response values via the given mapping function and passing along + * exceptions to the delegate. + * + * Notice that it is considered a bug if the listener's onResponse or onFailure fails. onResponse failures will not call onFailure. + * + * If the function fails, the listener's onFailure handler will be called. The principle is that the mapped listener will handle + * exceptions from the mapping function {@code fn} but it is the responsibility of {@code delegate} to handle its own exceptions + * inside `onResponse` and `onFailure`. + * + * @param delegate Listener to delegate to + * @param fn Function to apply to listener response + * @param Response type of the new listener + * @param Response type of the wrapped listener + * @return a listener that maps the received response and then passes it to its delegate listener + */ + static ActionListener map(ActionListener delegate, CheckedFunction fn) { + return new ActionListener() { + @Override + public void onResponse(Response response) { + T mapped; + try { + mapped = fn.apply(response); + } catch (Exception e) { + onFailure(e); + return; + } + try { + delegate.onResponse(mapped); + } catch (RuntimeException e) { + assert false : new AssertionError("map: listener.onResponse failed", e); + throw e; + } + } + + @Override + public void onFailure(Exception e) { + try { + delegate.onFailure(e); + } catch (RuntimeException ex) { + if (ex != e) { + ex.addSuppressed(e); + } + assert false : new AssertionError("map: listener.onFailure failed", ex); + throw ex; + } + } + }; + } + + /** + * Converts a listener to a {@link BiConsumer} for compatibility with the {@link java.util.concurrent.CompletableFuture} + * api. + * + * @param listener that will be wrapped + * @param the type of the response + * @return a bi consumer that will complete the wrapped listener + */ + static BiConsumer toBiConsumer(ActionListener listener) { + return (response, throwable) -> { + if (throwable == null) { + listener.onResponse(response); + } else { + listener.onFailure(throwable); + } + }; + } + + /** + * Notifies every given listener with the response passed to {@link #onResponse(Object)}. If a listener itself throws an exception + * the exception is forwarded to {@link #onFailure(Exception)}. If in turn {@link #onFailure(Exception)} fails all remaining + * listeners will be processed and the caught exception will be re-thrown. + */ + static void onResponse(Iterable> listeners, Response response) { + List exceptionList = new ArrayList<>(); + for (ActionListener listener : listeners) { + try { + listener.onResponse(response); + } catch (Exception ex) { + try { + listener.onFailure(ex); + } catch (Exception ex1) { + exceptionList.add(ex1); + } + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptionList); + } + + /** + * Notifies every given listener with the failure passed to {@link #onFailure(Exception)}. If a listener itself throws an exception + * all remaining listeners will be processed and the caught exception will be re-thrown. + */ + static void onFailure(Iterable> listeners, Exception failure) { + List exceptionList = new ArrayList<>(); + for (ActionListener listener : listeners) { + try { + listener.onFailure(failure); + } catch (Exception ex) { + exceptionList.add(ex); + } + } + ExceptionsHelper.maybeThrowRuntimeAndSuppress(exceptionList); + } + + /** + * Wraps a given listener and returns a new listener which executes the provided {@code runAfter} + * callback when the listener is notified via either {@code #onResponse} or {@code #onFailure}. + */ + static ActionListener runAfter(ActionListener delegate, Runnable runAfter) { + return new ActionListener() { + @Override + public void onResponse(Response response) { + try { + delegate.onResponse(response); + } finally { + runAfter.run(); + } + } + + @Override + public void onFailure(Exception e) { + try { + delegate.onFailure(e); + } finally { + runAfter.run(); + } + } + }; + } + + /** + * Wraps a given listener and returns a new listener which executes the provided {@code runBefore} + * callback before the listener is notified via either {@code #onResponse} or {@code #onFailure}. + * If the callback throws an exception then it will be passed to the listener's {@code #onFailure} and its {@code #onResponse} will + * not be executed. + */ + static ActionListener runBefore(ActionListener delegate, CheckedRunnable runBefore) { + return new ActionListener() { + @Override + public void onResponse(Response response) { + try { + runBefore.run(); + } catch (Exception ex) { + delegate.onFailure(ex); + return; + } + delegate.onResponse(response); + } + + @Override + public void onFailure(Exception e) { + try { + runBefore.run(); + } catch (Exception ex) { + e.addSuppressed(ex); + } + delegate.onFailure(e); + } + }; + } + + /** + * Wraps a given listener and returns a new listener which makes sure {@link #onResponse(Object)} + * and {@link #onFailure(Exception)} of the provided listener will be called at most once. + */ +// static ActionListener notifyOnce(ActionListener delegate) { +// return new NotifyOnceListener() { +// @Override +// protected void innerOnResponse(Response response) { +// delegate.onResponse(response); +// } +// +// @Override +// protected void innerOnFailure(Exception e) { +// delegate.onFailure(e); +// } +// }; +// } + + /** + * Completes the given listener with the result from the provided supplier accordingly. + * This method is mainly used to complete a listener with a block of synchronous code. + * + * If the supplier fails, the listener's onFailure handler will be called. + * It is the responsibility of {@code delegate} to handle its own exceptions inside `onResponse` and `onFailure`. + */ + static void completeWith(ActionListener listener, CheckedSupplier supplier) { + Response response; + try { + response = supplier.get(); + } catch (Exception e) { + try { + listener.onFailure(e); + } catch (RuntimeException ex) { + assert false : ex; + throw ex; + } + return; + } + try { + listener.onResponse(response); + } catch (RuntimeException ex) { + assert false : ex; + throw ex; + } + } +} diff --git a/src/main/java/transportservice/action/ActionListenerResponseHandler.java b/src/main/java/transportservice/action/ActionListenerResponseHandler.java new file mode 100644 index 0000000..ccb1a65 --- /dev/null +++ b/src/main/java/transportservice/action/ActionListenerResponseHandler.java @@ -0,0 +1,89 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.action; + +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TransportException; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportResponseHandler; + +import java.io.IOException; +import java.util.Objects; + +/** + * A simple base class for action response listeners, defaulting to using the SAME executor (as its + * very common on response handlers). + */ +public class ActionListenerResponseHandler implements TransportResponseHandler { + + private final ActionListener listener; + private final Writeable.Reader reader; + private final String executor; + + public ActionListenerResponseHandler(ActionListener listener, Writeable.Reader reader, String executor) { + this.listener = Objects.requireNonNull(listener); + this.reader = Objects.requireNonNull(reader); + this.executor = Objects.requireNonNull(executor); + } + + public ActionListenerResponseHandler(ActionListener listener, Writeable.Reader reader) { + this(listener, reader, ThreadPool.Names.SAME); + } + + @Override + public void handleResponse(Response response) { + listener.onResponse(response); + } + + @Override + public void handleException(TransportException e) { + listener.onFailure(e); + } + + @Override + public String executor() { + return executor; + } + + @Override + public Response read(StreamInput in) throws IOException { + return reader.read(in); + } + + @Override + public String toString() { + return super.toString() + "/" + listener; + } +} diff --git a/src/main/java/transportservice/action/ActionRunnable.java b/src/main/java/transportservice/action/ActionRunnable.java new file mode 100644 index 0000000..8959502 --- /dev/null +++ b/src/main/java/transportservice/action/ActionRunnable.java @@ -0,0 +1,103 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.action; + +import org.opensearch.common.CheckedConsumer; +import org.opensearch.common.CheckedRunnable; +import org.opensearch.common.CheckedSupplier; +import org.opensearch.common.util.concurrent.AbstractRunnable; + +/** + * Base class for {@link Runnable}s that need to call {@link ActionListener#onFailure(Exception)} in case an uncaught + * exception or error is thrown while the actual action is run. + */ +public abstract class ActionRunnable extends AbstractRunnable { + + protected final ActionListener listener; + + /** + * Creates a {@link Runnable} that invokes the given listener with {@code null} after the given runnable has executed. + * @param listener Listener to invoke + * @param runnable Runnable to execute + * @return Wrapped {@code Runnable} + */ + public static ActionRunnable run(ActionListener listener, CheckedRunnable runnable) { + return new ActionRunnable(listener) { + @Override + protected void doRun() throws Exception { + runnable.run(); + listener.onResponse(null); + } + }; + } + + /** + * Creates a {@link Runnable} that invokes the given listener with the return of the given supplier. + * @param listener Listener to invoke + * @param supplier Supplier that provides value to pass to listener + * @return Wrapped {@code Runnable} + */ + public static ActionRunnable supply(ActionListener listener, CheckedSupplier supplier) { + return ActionRunnable.wrap(listener, l -> l.onResponse(supplier.get())); + } + + /** + * Creates a {@link Runnable} that wraps the given listener and a consumer of it that is executed when the {@link Runnable} is run. + * Invokes {@link ActionListener#onFailure(Exception)} on it if an exception is thrown on executing the consumer. + * @param listener ActionListener to wrap + * @param consumer Consumer of wrapped {@code ActionListener} + * @param Type of the given {@code ActionListener} + * @return Wrapped {@code Runnable} + */ + public static ActionRunnable wrap(ActionListener listener, CheckedConsumer, Exception> consumer) { + return new ActionRunnable(listener) { + @Override + protected void doRun() throws Exception { + consumer.accept(listener); + } + }; + } + + public ActionRunnable(ActionListener listener) { + this.listener = listener; + } + + /** + * Calls the action listeners {@link ActionListener#onFailure(Exception)} method with the given exception. + * This method is invoked for all exception thrown by {@link #doRun()} + */ + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } +} diff --git a/src/main/java/transportservice/action/AdapterActionFuture.java b/src/main/java/transportservice/action/AdapterActionFuture.java new file mode 100644 index 0000000..5c66a9b --- /dev/null +++ b/src/main/java/transportservice/action/AdapterActionFuture.java @@ -0,0 +1,98 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.action; + +import org.opensearch.OpenSearchException; +import org.opensearch.action.ActionFuture; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.BaseFuture; +import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.common.util.concurrent.UncategorizedExecutionException; + +import java.util.concurrent.TimeUnit; + +public abstract class AdapterActionFuture extends BaseFuture implements ActionFuture, ActionListener { + + @Override + public T actionGet() { + try { + return FutureUtils.get(this); + } catch (OpenSearchException e) { + throw unwrapEsException(e); + } + } + + @Override + public T actionGet(String timeout) { + return actionGet(TimeValue.parseTimeValue(timeout, null, getClass().getSimpleName() + ".actionGet.timeout")); + } + + @Override + public T actionGet(long timeoutMillis) { + return actionGet(timeoutMillis, TimeUnit.MILLISECONDS); + } + + @Override + public T actionGet(TimeValue timeout) { + return actionGet(timeout.millis(), TimeUnit.MILLISECONDS); + } + + @Override + public T actionGet(long timeout, TimeUnit unit) { + try { + return FutureUtils.get(this, timeout, unit); + } catch (OpenSearchException e) { + throw unwrapEsException(e); + } + } + + @Override + public void onResponse(L result) { + set(convert(result)); + } + + @Override + public void onFailure(Exception e) { + setException(e); + } + + protected abstract T convert(L listenerResponse); + + private static RuntimeException unwrapEsException(OpenSearchException esEx) { + Throwable root = esEx.unwrapCause(); + if (root instanceof RuntimeException) { + return (RuntimeException) root; + } + return new UncategorizedExecutionException("Failed execution", root); + } +} diff --git a/src/main/java/transportservice/action/ContextPreservingActionListener.java b/src/main/java/transportservice/action/ContextPreservingActionListener.java new file mode 100644 index 0000000..1661b31 --- /dev/null +++ b/src/main/java/transportservice/action/ContextPreservingActionListener.java @@ -0,0 +1,79 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.action; + + +import transportservice.common.ThreadContext; + +import java.util.function.Supplier; + +/** + * Restores the given {@link ThreadContext.StoredContext} + * once the listener is invoked + */ +public final class ContextPreservingActionListener implements ActionListener { + + private final ActionListener delegate; + private final Supplier context; + + public ContextPreservingActionListener(Supplier contextSupplier, ActionListener delegate) { + this.delegate = delegate; + this.context = contextSupplier; + } + + @Override + public void onResponse(R r) { + try (ThreadContext.StoredContext ignore = context.get()) { + delegate.onResponse(r); + } + } + + @Override + public void onFailure(Exception e) { + try (ThreadContext.StoredContext ignore = context.get()) { + delegate.onFailure(e); + } + } + + @Override + public String toString() { + return getClass().getName() + "/" + delegate.toString(); + } + + /** + * Wraps the provided action listener in a {@link ContextPreservingActionListener} that will + * also copy the response headers when the {@link ThreadContext.StoredContext} is closed + */ + public static ContextPreservingActionListener wrapPreservingContext(ActionListener listener, transportservice.common.ThreadContext threadContext) { + return new ContextPreservingActionListener<>(threadContext.newRestorableContext(true), listener); + } +} diff --git a/src/main/java/transportservice/action/PlainActionFuture.java b/src/main/java/transportservice/action/PlainActionFuture.java new file mode 100644 index 0000000..f17df7a --- /dev/null +++ b/src/main/java/transportservice/action/PlainActionFuture.java @@ -0,0 +1,53 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.action; + +import org.opensearch.common.CheckedConsumer; + +public class PlainActionFuture extends AdapterActionFuture { + + public static PlainActionFuture newFuture() { + return new PlainActionFuture<>(); + } + + public static T get(CheckedConsumer, E> e) throws E { + PlainActionFuture fut = newFuture(); + e.accept(fut); + return fut.actionGet(); + } + + @Override + protected T convert(T listenerResponse) { + return listenerResponse; + } +} diff --git a/src/main/java/transportservice/common/AbstractRunnable.java b/src/main/java/transportservice/common/AbstractRunnable.java new file mode 100644 index 0000000..c90988e --- /dev/null +++ b/src/main/java/transportservice/common/AbstractRunnable.java @@ -0,0 +1,84 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.common; + +/** + * An extension to runnable. + */ +public abstract class AbstractRunnable implements Runnable { + + /** + * Should the runnable force its execution in case it gets rejected? + */ + public boolean isForceExecution() { + return false; + } + + @Override + public final void run() { + try { + doRun(); + } catch (Exception t) { + onFailure(t); + } finally { + onAfter(); + } + } + + /** + * This method is called in a finally block after successful execution + * or on a rejection. + */ + public void onAfter() { + // nothing by default + } + + /** + * This method is invoked for all exception thrown by {@link #doRun()} + */ + public abstract void onFailure(Exception e); + + /** + * This should be executed if the thread-pool executing this action rejected the execution. + * The default implementation forwards to {@link #onFailure(Exception)} + */ + public void onRejection(Exception e) { + onFailure(e); + } + + /** + * This method has the same semantics as {@link Runnable#run()} + * @throws InterruptedException if the run method throws an InterruptedException + */ + protected abstract void doRun() throws Exception; +} diff --git a/src/main/java/transportservice/common/ListenableFuture.java b/src/main/java/transportservice/common/ListenableFuture.java new file mode 100644 index 0000000..87e5085 --- /dev/null +++ b/src/main/java/transportservice/common/ListenableFuture.java @@ -0,0 +1,155 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.common; + +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.util.concurrent.BaseFuture; +import org.opensearch.common.util.concurrent.FutureUtils; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import transportservice.action.ActionListener; +import transportservice.action.ActionRunnable; +import transportservice.action.ContextPreservingActionListener; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * A future implementation that allows for the result to be passed to listeners waiting for + * notification. This is useful for cases where a computation is requested many times + * concurrently, but really only needs to be performed a single time. Once the computation + * has been performed the registered listeners will be notified by submitting a runnable + * for execution in the provided {@link ExecutorService}. If the computation has already + * been performed, a request to add a listener will simply result in execution of the listener + * on the calling thread. + */ +public final class ListenableFuture extends BaseFuture implements ActionListener { + + private volatile boolean done = false; + private final List, ExecutorService>> listeners = new ArrayList<>(); + + /** + * Adds a listener to this future. If the future has not yet completed, the listener will be + * notified of a response or exception in a runnable submitted to the ExecutorService provided. + * If the future has completed, the listener will be notified immediately without forking to + * a different thread. + */ + public void addListener(transportservice.action.ActionListener listener, ExecutorService executor) { + addListener(listener, executor, null); + } + + /** + * Adds a listener to this future. If the future has not yet completed, the listener will be + * notified of a response or exception in a runnable submitted to the ExecutorService provided. + * If the future has completed, the listener will be notified immediately without forking to + * a different thread. + * + * It will apply the provided ThreadContext (if not null) when executing the listening. + */ + public void addListener(ActionListener listener, ExecutorService executor, ThreadContext threadContext) { + if (done) { + // run the callback directly, we don't hold the lock and don't need to fork! + notifyListener(listener, OpenSearchExecutors.newDirectExecutorService()); + } else { + final boolean run; + // check done under lock since it could have been modified and protect modifications + // to the list under lock + synchronized (this) { + if (done) { + run = true; + } else { + final ActionListener wrappedListener; + if (threadContext == null) { + wrappedListener = listener; + } else { + wrappedListener = ContextPreservingActionListener.wrapPreservingContext(listener, threadContext); + } + listeners.add(new Tuple<>(wrappedListener, executor)); + run = false; + } + } + + if (run) { + // run the callback directly, we don't hold the lock and don't need to fork! + notifyListener(listener, OpenSearchExecutors.newDirectExecutorService()); + } + } + } + + @Override + protected synchronized void done(boolean ignored) { + done = true; + listeners.forEach(t -> notifyListener(t.v1(), t.v2())); + // release references to any listeners as we no longer need them and will live + // much longer than the listeners in most cases + listeners.clear(); + } + + private void notifyListener(transportservice.action.ActionListener listener, ExecutorService executorService) { + try { + executorService.execute(new ActionRunnable(listener) { + @Override + protected void doRun() { + // call get in a non-blocking fashion as we could be on a network thread + // or another thread like the scheduler, which we should never block! + V value = FutureUtils.get(ListenableFuture.this, 0L, TimeUnit.NANOSECONDS); + listener.onResponse(value); + } + + @Override + public String toString() { + return "ListenableFuture notification"; + } + }); + } catch (Exception e) { + listener.onFailure(e); + } + } + + @Override + public void onResponse(V v) { + final boolean set = set(v); + if (set == false) { + throw new IllegalStateException("did not set value, value or exception already set?"); + } + } + + @Override + public void onFailure(Exception e) { + final boolean set = setException(e); + if (set == false) { + throw new IllegalStateException("did not set exception, value already set or exception already set?"); + } + } +} diff --git a/src/main/java/transportservice/common/ThreadContext.java b/src/main/java/transportservice/common/ThreadContext.java new file mode 100644 index 0000000..71a7671 --- /dev/null +++ b/src/main/java/transportservice/common/ThreadContext.java @@ -0,0 +1,835 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.common; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.action.support.ContextPreservingActionListener; +import org.opensearch.client.OriginSettingClient; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.collect.Tuple; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.io.stream.Writeable; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Setting.Property; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.WrappedRunnable; +import org.opensearch.http.HttpTransportSettings; +import org.opensearch.tasks.Task; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.*; +import java.util.function.BiConsumer; +import java.util.function.BinaryOperator; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collector; +import java.util.stream.Stream; + +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT; +import static org.opensearch.http.HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE; + +/** + * A ThreadContext is a map of string headers and a transient map of keyed objects that are associated with + * a thread. It allows to store and retrieve header information across method calls, network calls as well as threads spawned from a + * thread that has a {@link ThreadContext} associated with. Threads spawned from a {@link org.opensearch.threadpool.ThreadPool} + * have out of the box support for {@link ThreadContext} and all threads spawned will inherit the {@link ThreadContext} from the thread + * that it is forking from.". Network calls will also preserve the senders headers automatically. + *

+ * Consumers of ThreadContext usually don't need to interact with adding or stashing contexts. Every opensearch thread is managed by + * a thread pool or executor being responsible for stashing and restoring the threads context. For instance if a network request is + * received, all headers are deserialized from the network and directly added as the headers of the threads {@link ThreadContext} + * (see {@link #readHeaders(StreamInput)}. In order to not modify the context that is currently active on this thread the network code + * uses a try/with pattern to stash it's current context, read headers into a fresh one and once the request is handled or a handler thread + * is forked (which in turn inherits the context) it restores the previous context. For instance: + *

+ *
+ *     // current context is stashed and replaced with a default context
+ *     try (StoredContext context = threadContext.stashContext()) {
+ *         threadContext.readHeaders(in); // read headers into current context
+ *         if (fork) {
+ *             threadPool.execute(() -> request.handle()); // inherits context
+ *         } else {
+ *             request.handle();
+ *         }
+ *     }
+ *     // previous context is restored on StoredContext#close()
+ * 
+ * + */ +public final class ThreadContext implements Writeable { + + public static final String PREFIX = "request.headers"; + public static final Setting DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope); + + /** + * Name for the {@link #stashWithOrigin origin} attribute. + */ + public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; + + private static final Logger logger = LogManager.getLogger(ThreadContext.class); + private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(); + private final Map defaultHeader; + private final ThreadLocal threadLocal; + private final int maxWarningHeaderCount; + private final long maxWarningHeaderSize; + + /** + * Creates a new ThreadContext instance + * @param settings the settings to read the default request headers from + */ + public ThreadContext(Settings settings) { + this.defaultHeader = buildDefaultHeaders(settings); + this.threadLocal = ThreadLocal.withInitial(() -> DEFAULT_CONTEXT); + this.maxWarningHeaderCount = SETTING_HTTP_MAX_WARNING_HEADER_COUNT.get(settings); + this.maxWarningHeaderSize = SETTING_HTTP_MAX_WARNING_HEADER_SIZE.get(settings).getBytes(); + } + + /** + * Removes the current context and resets a default context. The removed context can be + * restored by closing the returned {@link StoredContext}. + */ + public StoredContext stashContext() { + final ThreadContextStruct context = threadLocal.get(); + /** + * X-Opaque-ID should be preserved in a threadContext in order to propagate this across threads. + * This is needed so the DeprecationLogger in another thread can see the value of X-Opaque-ID provided by a user. + * Otherwise when context is stash, it should be empty. + */ + if (context.requestHeaders.containsKey(Task.X_OPAQUE_ID)) { + ThreadContextStruct threadContextStruct = DEFAULT_CONTEXT.putHeaders( + MapBuilder.newMapBuilder() + .put(Task.X_OPAQUE_ID, context.requestHeaders.get(Task.X_OPAQUE_ID)) + .immutableMap() + ); + threadLocal.set(threadContextStruct); + } else { + threadLocal.set(DEFAULT_CONTEXT); + } + return () -> { + // If the node and thus the threadLocal get closed while this task + // is still executing, we don't want this runnable to fail with an + // uncaught exception + threadLocal.set(context); + }; + } + + /** + * Captures the current thread context as writeable, allowing it to be serialized out later + */ + public Writeable captureAsWriteable() { + final ThreadContextStruct context = threadLocal.get(); + return out -> context.writeTo(out, defaultHeader); + } + + /** + * Removes the current context and resets a default context marked with as + * originating from the supplied string. The removed context can be + * restored by closing the returned {@link StoredContext}. Callers should + * be careful to save the current context before calling this method and + * restore it any listeners, likely with + * {@link ContextPreservingActionListener}. Use {@link OriginSettingClient} + * which can be used to do this automatically. + *

+ * Without security the origin is ignored, but security uses it to authorize + * actions that are made up of many sub-actions. These actions call + * {@link #stashWithOrigin} before performing on behalf of a user that + * should be allowed even if the user doesn't have permission to perform + * those actions on their own. + *

+ * For example, a user might not have permission to GET from the tasks index + * but the tasks API will perform a get on their behalf using this method + * if it can't find the task in memory. + */ + public StoredContext stashWithOrigin(String origin) { + final StoredContext storedContext = stashContext(); + putTransient(ACTION_ORIGIN_TRANSIENT_NAME, origin); + return storedContext; + } + + /** + * Removes the current context and resets a new context that contains a merge of the current headers and the given headers. + * The removed context can be restored when closing the returned {@link StoredContext}. The merge strategy is that headers + * that are already existing are preserved unless they are defaults. + */ + public StoredContext stashAndMergeHeaders(Map headers) { + final ThreadContextStruct context = threadLocal.get(); + Map newHeader = new HashMap<>(headers); + newHeader.putAll(context.requestHeaders); + threadLocal.set(DEFAULT_CONTEXT.putHeaders(newHeader)); + return () -> threadLocal.set(context); + } + + /** + * Just like {@link #stashContext()} but no default context is set. + * @param preserveResponseHeaders if set to true the response headers of the restore thread will be preserved. + */ + public StoredContext newStoredContext(boolean preserveResponseHeaders) { + return newStoredContext(preserveResponseHeaders, Collections.emptyList()); + } + + /** + * Just like {@link #stashContext()} but no default context is set. Instead, the {@code transientHeadersToClear} argument can be used + * to clear specific transient headers in the new context. All headers (with the possible exception of {@code responseHeaders}) are + * restored by closing the returned {@link StoredContext}. + * + * @param preserveResponseHeaders if set to true the response headers of the restore thread will be preserved. + */ + public StoredContext newStoredContext(boolean preserveResponseHeaders, Collection transientHeadersToClear) { + final ThreadContextStruct originalContext = threadLocal.get(); + // clear specific transient headers from the current context + Map newTransientHeaders = null; + for (String transientHeaderToClear : transientHeadersToClear) { + if (originalContext.transientHeaders.containsKey(transientHeaderToClear)) { + if (newTransientHeaders == null) { + newTransientHeaders = new HashMap<>(originalContext.transientHeaders); + } + newTransientHeaders.remove(transientHeaderToClear); + } + } + if (newTransientHeaders != null) { + ThreadContextStruct threadContextStruct = new ThreadContextStruct( + originalContext.requestHeaders, + originalContext.responseHeaders, + newTransientHeaders, + originalContext.isSystemContext, + originalContext.warningHeadersSize + ); + threadLocal.set(threadContextStruct); + } + // this is the context when this method returns + final ThreadContextStruct newContext = threadLocal.get(); + return () -> { + if (preserveResponseHeaders && threadLocal.get() != newContext) { + threadLocal.set(originalContext.putResponseHeaders(threadLocal.get().responseHeaders)); + } else { + threadLocal.set(originalContext); + } + }; + } + + /** + * Returns a supplier that gathers a {@link #newStoredContext(boolean)} and restores it once the + * returned supplier is invoked. The context returned from the supplier is a stored version of the + * suppliers callers context that should be restored once the originally gathered context is not needed anymore. + * For instance this method should be used like this: + * + *

+     *     Supplier<ThreadContext.StoredContext> restorable = context.newRestorableContext(true);
+     *     new Thread() {
+     *         public void run() {
+     *             try (ThreadContext.StoredContext ctx = restorable.get()) {
+     *                 // execute with the parents context and restore the threads context afterwards
+     *             }
+     *         }
+     *
+     *     }.start();
+     * 
+ * + * @param preserveResponseHeaders if set to true the response headers of the restore thread will be preserved. + * @return a restorable context supplier + */ + public Supplier newRestorableContext(boolean preserveResponseHeaders) { + return wrapRestorable(newStoredContext(preserveResponseHeaders)); + } + + /** + * Same as {@link #newRestorableContext(boolean)} but wraps an existing context to restore. + * @param storedContext the context to restore + */ + public Supplier wrapRestorable(StoredContext storedContext) { + return () -> { + StoredContext context = newStoredContext(false); + storedContext.restore(); + return context; + }; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + threadLocal.get().writeTo(out, defaultHeader); + } + + /** + * Reads the headers from the stream into the current context + */ + public void readHeaders(StreamInput in) throws IOException { + setHeaders(readHeadersFromStream(in)); + } + + public void setHeaders(Tuple, Map>> headerTuple) { + final Map requestHeaders = headerTuple.v1(); + final Map> responseHeaders = headerTuple.v2(); + final ThreadContextStruct struct; + if (requestHeaders.isEmpty() && responseHeaders.isEmpty()) { + struct = ThreadContextStruct.EMPTY; + } else { + struct = new ThreadContextStruct(requestHeaders, responseHeaders, Collections.emptyMap(), false); + } + threadLocal.set(struct); + } + + public static Tuple, Map>> readHeadersFromStream(StreamInput in) throws IOException { + final Map requestHeaders = in.readMap(StreamInput::readString, StreamInput::readString); + final Map> responseHeaders = in.readMap(StreamInput::readString, input -> { + final int size = input.readVInt(); + if (size == 0) { + return Collections.emptySet(); + } else if (size == 1) { + return Collections.singleton(input.readString()); + } else { + // use a linked hash set to preserve order + final LinkedHashSet values = new LinkedHashSet<>(size); + for (int i = 0; i < size; i++) { + final String value = input.readString(); + final boolean added = values.add(value); + assert added : value; + } + return values; + } + }); + return new Tuple<>(requestHeaders, responseHeaders); + } + + /** + * Returns the header for the given key or null if not present + */ + public String getHeader(String key) { + String value = threadLocal.get().requestHeaders.get(key); + if (value == null) { + return defaultHeader.get(key); + } + return value; + } + + /** + * Returns all of the request headers from the thread's context.
+ * Be advised, headers might contain credentials. + * In order to avoid storing, and erroneously exposing, such headers, + * it is recommended to instead store security headers that prove + * the credentials have been verified successfully, and which are + * internal to the system, in the sense that they cannot be sent + * by the clients. + */ + public Map getHeaders() { + HashMap map = new HashMap<>(defaultHeader); + map.putAll(threadLocal.get().requestHeaders); + return Collections.unmodifiableMap(map); + } + + /** + * Returns the request headers, without the default headers + */ + public Map getRequestHeadersOnly() { + return Collections.unmodifiableMap(new HashMap<>(threadLocal.get().requestHeaders)); + } + + /** + * Get a copy of all response headers. + * + * @return Never {@code null}. + */ + public Map> getResponseHeaders() { + Map> responseHeaders = threadLocal.get().responseHeaders; + HashMap> map = new HashMap<>(responseHeaders.size()); + + for (Map.Entry> entry : responseHeaders.entrySet()) { + map.put(entry.getKey(), Collections.unmodifiableList(new ArrayList<>(entry.getValue()))); + } + + return Collections.unmodifiableMap(map); + } + + /** + * Copies all header key, value pairs into the current context + */ + public void copyHeaders(Iterable> headers) { + threadLocal.set(threadLocal.get().copyHeaders(headers)); + } + + /** + * Puts a header into the context + */ + public void putHeader(String key, String value) { + threadLocal.set(threadLocal.get().putRequest(key, value)); + } + + /** + * Puts all of the given headers into this context + */ + public void putHeader(Map header) { + threadLocal.set(threadLocal.get().putHeaders(header)); + } + + /** + * Puts a transient header object into this context + */ + public void putTransient(String key, Object value) { + threadLocal.set(threadLocal.get().putTransient(key, value)); + } + + /** + * Returns a transient header object or null if there is no header for the given key + */ + @SuppressWarnings("unchecked") // (T)object + public T getTransient(String key) { + return (T) threadLocal.get().transientHeaders.get(key); + } + + /** + * Add the {@code value} for the specified {@code key} Any duplicate {@code value} is ignored. + * + * @param key the header name + * @param value the header value + */ + public void addResponseHeader(final String key, final String value) { + addResponseHeader(key, value, v -> v); + } + + /** + * Add the {@code value} for the specified {@code key} with the specified {@code uniqueValue} used for de-duplication. Any duplicate + * {@code value} after applying {@code uniqueValue} is ignored. + * + * @param key the header name + * @param value the header value + * @param uniqueValue the function that produces de-duplication values + */ + public void addResponseHeader(final String key, final String value, final Function uniqueValue) { + threadLocal.set(threadLocal.get().putResponse(key, value, uniqueValue, maxWarningHeaderCount, maxWarningHeaderSize)); + } + + /** + * Saves the current thread context and wraps command in a Runnable that restores that context before running command. If + * command has already been passed through this method then it is returned unaltered rather than wrapped twice. + */ + public Runnable preserveContext(Runnable command) { + if (command instanceof ContextPreservingAbstractRunnable) { + return command; + } + if (command instanceof ContextPreservingRunnable) { + return command; + } + if (command instanceof AbstractRunnable) { + return new ContextPreservingAbstractRunnable((transportservice.common.AbstractRunnable) command); + } + return new ContextPreservingRunnable(command); + } + + /** + * Unwraps a command that was previously wrapped by {@link #preserveContext(Runnable)}. + */ + public Runnable unwrap(Runnable command) { + if (command instanceof WrappedRunnable) { + return ((WrappedRunnable) command).unwrap(); + } + return command; + } + + /** + * Returns true if the current context is the default context. + */ + boolean isDefaultContext() { + return threadLocal.get() == DEFAULT_CONTEXT; + } + + /** + * Marks this thread context as an internal system context. This signals that actions in this context are issued + * by the system itself rather than by a user action. + */ + public void markAsSystemContext() { + threadLocal.set(threadLocal.get().setSystemContext()); + } + + /** + * Returns true iff this context is a system context + */ + public boolean isSystemContext() { + return threadLocal.get().isSystemContext; + } + + @FunctionalInterface + public interface StoredContext extends AutoCloseable { + @Override + void close(); + + default void restore() { + close(); + } + } + + public static Map buildDefaultHeaders(Settings settings) { + Settings headers = DEFAULT_HEADERS_SETTING.get(settings); + if (headers == null) { + return Collections.emptyMap(); + } else { + Map defaultHeader = new HashMap<>(); + for (String key : headers.names()) { + defaultHeader.put(key, headers.get(key)); + } + return Collections.unmodifiableMap(defaultHeader); + } + } + + private static final class ThreadContextStruct { + + private static final ThreadContextStruct EMPTY = new ThreadContextStruct( + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + false + ); + + private final Map requestHeaders; + private final Map transientHeaders; + private final Map> responseHeaders; + private final boolean isSystemContext; + // saving current warning headers' size not to recalculate the size with every new warning header + private final long warningHeadersSize; + + private ThreadContextStruct setSystemContext() { + if (isSystemContext) { + return this; + } + return new ThreadContextStruct(requestHeaders, responseHeaders, transientHeaders, true); + } + + private ThreadContextStruct( + Map requestHeaders, + Map> responseHeaders, + Map transientHeaders, + boolean isSystemContext + ) { + this.requestHeaders = requestHeaders; + this.responseHeaders = responseHeaders; + this.transientHeaders = transientHeaders; + this.isSystemContext = isSystemContext; + this.warningHeadersSize = 0L; + } + + private ThreadContextStruct( + Map requestHeaders, + Map> responseHeaders, + Map transientHeaders, + boolean isSystemContext, + long warningHeadersSize + ) { + this.requestHeaders = requestHeaders; + this.responseHeaders = responseHeaders; + this.transientHeaders = transientHeaders; + this.isSystemContext = isSystemContext; + this.warningHeadersSize = warningHeadersSize; + } + + /** + * This represents the default context and it should only ever be called by {@link #DEFAULT_CONTEXT}. + */ + private ThreadContextStruct() { + this(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap(), false); + } + + private ThreadContextStruct putRequest(String key, String value) { + Map newRequestHeaders = new HashMap<>(this.requestHeaders); + putSingleHeader(key, value, newRequestHeaders); + return new ThreadContextStruct(newRequestHeaders, responseHeaders, transientHeaders, isSystemContext); + } + + private static void putSingleHeader(String key, T value, Map newHeaders) { + if (newHeaders.putIfAbsent(key, value) != null) { + throw new IllegalArgumentException("value for key [" + key + "] already present"); + } + } + + private ThreadContextStruct putHeaders(Map headers) { + if (headers.isEmpty()) { + return this; + } else { + final Map newHeaders = new HashMap<>(this.requestHeaders); + for (Map.Entry entry : headers.entrySet()) { + putSingleHeader(entry.getKey(), entry.getValue(), newHeaders); + } + return new ThreadContextStruct(newHeaders, responseHeaders, transientHeaders, isSystemContext); + } + } + + private ThreadContextStruct putResponseHeaders(Map> headers) { + assert headers != null; + if (headers.isEmpty()) { + return this; + } + final Map> newResponseHeaders = new HashMap<>(this.responseHeaders); + for (Map.Entry> entry : headers.entrySet()) { + String key = entry.getKey(); + final Set existingValues = newResponseHeaders.get(key); + if (existingValues != null) { + final Set newValues = Stream.concat(entry.getValue().stream(), existingValues.stream()) + .collect(LINKED_HASH_SET_COLLECTOR); + newResponseHeaders.put(key, Collections.unmodifiableSet(newValues)); + } else { + newResponseHeaders.put(key, entry.getValue()); + } + } + return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext); + } + + private ThreadContextStruct putResponse( + final String key, + final String value, + final Function uniqueValue, + final int maxWarningHeaderCount, + final long maxWarningHeaderSize + ) { + assert value != null; + long newWarningHeaderSize = warningHeadersSize; + // check if we can add another warning header - if max size within limits + if (key.equals("Warning") && (maxWarningHeaderSize != -1)) { // if size is NOT unbounded, check its limits + if (warningHeadersSize > maxWarningHeaderSize) { // if max size has already been reached before + logger.warn( + "Dropping a warning header, as their total size reached the maximum allowed of [" + + maxWarningHeaderSize + + "] bytes set in [" + + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + + "]!" + ); + return this; + } + newWarningHeaderSize += "Warning".getBytes(StandardCharsets.UTF_8).length + value.getBytes(StandardCharsets.UTF_8).length; + if (newWarningHeaderSize > maxWarningHeaderSize) { + logger.warn( + "Dropping a warning header, as their total size reached the maximum allowed of [" + + maxWarningHeaderSize + + "] bytes set in [" + + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_SIZE.getKey() + + "]!" + ); + return new ThreadContextStruct( + requestHeaders, + responseHeaders, + transientHeaders, + isSystemContext, + newWarningHeaderSize + ); + } + } + + final Map> newResponseHeaders; + final Set existingValues = responseHeaders.get(key); + if (existingValues != null) { + if (existingValues.contains(uniqueValue.apply(value))) { + return this; + } + // preserve insertion order + final Set newValues = Stream.concat(existingValues.stream(), Stream.of(value)).collect(LINKED_HASH_SET_COLLECTOR); + newResponseHeaders = new HashMap<>(responseHeaders); + newResponseHeaders.put(key, Collections.unmodifiableSet(newValues)); + } else { + newResponseHeaders = new HashMap<>(responseHeaders); + newResponseHeaders.put(key, Collections.singleton(value)); + } + + // check if we can add another warning header - if max count within limits + if ((key.equals("Warning")) && (maxWarningHeaderCount != -1)) { // if count is NOT unbounded, check its limits + final int warningHeaderCount = newResponseHeaders.containsKey("Warning") ? newResponseHeaders.get("Warning").size() : 0; + if (warningHeaderCount > maxWarningHeaderCount) { + logger.warn( + "Dropping a warning header, as their total count reached the maximum allowed of [" + + maxWarningHeaderCount + + "] set in [" + + HttpTransportSettings.SETTING_HTTP_MAX_WARNING_HEADER_COUNT.getKey() + + "]!" + ); + return this; + } + } + return new ThreadContextStruct(requestHeaders, newResponseHeaders, transientHeaders, isSystemContext, newWarningHeaderSize); + } + + private ThreadContextStruct putTransient(String key, Object value) { + Map newTransient = new HashMap<>(this.transientHeaders); + putSingleHeader(key, value, newTransient); + return new ThreadContextStruct(requestHeaders, responseHeaders, newTransient, isSystemContext); + } + + private ThreadContextStruct copyHeaders(Iterable> headers) { + Map newHeaders = new HashMap<>(); + for (Map.Entry header : headers) { + newHeaders.put(header.getKey(), header.getValue()); + } + return putHeaders(newHeaders); + } + + private void writeTo(StreamOutput out, Map defaultHeaders) throws IOException { + final Map requestHeaders; + if (defaultHeaders.isEmpty()) { + requestHeaders = this.requestHeaders; + } else { + requestHeaders = new HashMap<>(defaultHeaders); + requestHeaders.putAll(this.requestHeaders); + } + + out.writeVInt(requestHeaders.size()); + for (Map.Entry entry : requestHeaders.entrySet()) { + out.writeString(entry.getKey()); + out.writeString(entry.getValue()); + } + + out.writeMap(responseHeaders, StreamOutput::writeString, StreamOutput::writeStringCollection); + } + } + + /** + * Wraps a Runnable to preserve the thread context. + */ + private class ContextPreservingRunnable implements WrappedRunnable { + private final Runnable in; + private final StoredContext ctx; + + private ContextPreservingRunnable(Runnable in) { + ctx = newStoredContext(false); + this.in = in; + } + + @Override + public void run() { + try (StoredContext ignore = stashContext()) { + ctx.restore(); + in.run(); + } + } + + @Override + public String toString() { + return in.toString(); + } + + @Override + public Runnable unwrap() { + return in; + } + } + + /** + * Wraps an AbstractRunnable to preserve the thread context. + */ + private class ContextPreservingAbstractRunnable extends transportservice.common.AbstractRunnable implements WrappedRunnable { + private final transportservice.common.AbstractRunnable in; + private final StoredContext creatorsContext; + + private StoredContext threadsOriginalContext = null; + + private ContextPreservingAbstractRunnable(transportservice.common.AbstractRunnable in) { + creatorsContext = newStoredContext(false); + this.in = in; + } + + @Override + public boolean isForceExecution() { + return in.isForceExecution(); + } + + @Override + public void onAfter() { + try { + in.onAfter(); + } finally { + if (threadsOriginalContext != null) { + threadsOriginalContext.restore(); + } + } + } + + @Override + public void onFailure(Exception e) { + in.onFailure(e); + } + + @Override + public void onRejection(Exception e) { + in.onRejection(e); + } + + @Override + protected void doRun() throws Exception { + threadsOriginalContext = stashContext(); + creatorsContext.restore(); + in.doRun(); + } + + @Override + public String toString() { + return in.toString(); + } + + @Override + public transportservice.common.AbstractRunnable unwrap() { + return in; + } + } + + private static final Collector, Set> LINKED_HASH_SET_COLLECTOR = new LinkedHashSetCollector<>(); + + private static class LinkedHashSetCollector implements Collector, Set> { + @Override + public Supplier> supplier() { + return LinkedHashSet::new; + } + + @Override + public BiConsumer, T> accumulator() { + return Set::add; + } + + @Override + public BinaryOperator> combiner() { + return (left, right) -> { + left.addAll(right); + return left; + }; + } + + @Override + public Function, Set> finisher() { + return Function.identity(); + } + + private static final Set CHARACTERISTICS = Collections.unmodifiableSet( + EnumSet.of(Characteristics.IDENTITY_FINISH) + ); + + @Override + public Set characteristics() { + return CHARACTERISTICS; + } + } + +} diff --git a/src/main/java/transportservice/component/AbstractLifecycleComponent.java b/src/main/java/transportservice/component/AbstractLifecycleComponent.java new file mode 100644 index 0000000..a98c9d0 --- /dev/null +++ b/src/main/java/transportservice/component/AbstractLifecycleComponent.java @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.component; + +import org.opensearch.common.component.Lifecycle; +import org.opensearch.common.component.LifecycleComponent; +import org.opensearch.common.component.LifecycleListener; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; + +public abstract class AbstractLifecycleComponent implements LifecycleComponent { + + protected final org.opensearch.common.component.Lifecycle lifecycle = new org.opensearch.common.component.Lifecycle(); + + private final List listeners = new CopyOnWriteArrayList<>(); + + protected AbstractLifecycleComponent() {} + + @Override + public Lifecycle.State lifecycleState() { + return this.lifecycle.state(); + } + + @Override + public void addLifecycleListener(LifecycleListener listener) { + listeners.add(listener); + } + + @Override + public void removeLifecycleListener(LifecycleListener listener) { + listeners.remove(listener); + } + + @Override + public void start() { + synchronized (lifecycle) { + if (!lifecycle.canMoveToStarted()) { + return; + } + for (LifecycleListener listener : listeners) { + listener.beforeStart(); + } + doStart(); + lifecycle.moveToStarted(); + for (LifecycleListener listener : listeners) { + listener.afterStart(); + } + } + } + + protected abstract void doStart(); + + @Override + public void stop() { + synchronized (lifecycle) { + if (!lifecycle.canMoveToStopped()) { + return; + } + for (LifecycleListener listener : listeners) { + listener.beforeStop(); + } + lifecycle.moveToStopped(); + doStop(); + for (LifecycleListener listener : listeners) { + listener.afterStop(); + } + } + } + + protected abstract void doStop(); + + @Override + public void close() { + synchronized (lifecycle) { + if (lifecycle.started()) { + stop(); + } + if (!lifecycle.canMoveToClosed()) { + return; + } + for (LifecycleListener listener : listeners) { + listener.beforeClose(); + } + lifecycle.moveToClosed(); + try { + doClose(); + } catch (IOException e) { + throw new UncheckedIOException(e); + } finally { + for (LifecycleListener listener : listeners) { + listener.afterClose(); + } + } + } + } + + protected abstract void doClose() throws IOException; +} diff --git a/src/main/java/transportservice/component/Lifecycle.java b/src/main/java/transportservice/component/Lifecycle.java new file mode 100644 index 0000000..105ac25 --- /dev/null +++ b/src/main/java/transportservice/component/Lifecycle.java @@ -0,0 +1,210 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.component; + +/** + * Lifecycle state. Allows the following transitions: + *
    + *
  • INITIALIZED -> STARTED, STOPPED, CLOSED
  • + *
  • STARTED -> STOPPED
  • + *
  • STOPPED -> STARTED, CLOSED
  • + *
  • CLOSED ->
  • + *
+ *

+ * Also allows to stay in the same state. For example, when calling stop on a component, the + * following logic can be applied: + *

+ * public void stop() {
+ *  if (!lifecycleState.moveToStopped()) {
+ *      return;
+ *  }
+ * // continue with stop logic
+ * }
+ * 
+ *

+ * NOTE: The Lifecycle class is thread-safe. It is also possible to prevent concurrent state transitions + * by locking on the Lifecycle object itself. This is typically useful when chaining multiple transitions. + *

+ * Note, closed is only allowed to be called when stopped, so make sure to stop the component first. + * Here is how the logic can be applied. A lock of the {@code lifecycleState} object is taken so that + * another thread cannot move the state from {@code STOPPED} to {@code STARTED} before it has moved to + * {@code CLOSED}. + *

+ * public void close() {
+ *  synchronized (lifecycleState) {
+ *      if (lifecycleState.started()) {
+ *          stop();
+ *      }
+ *      if (!lifecycleState.moveToClosed()) {
+ *          return;
+ *      }
+ *  }
+ *  // perform close logic here
+ * }
+ * 
+ */ +public class Lifecycle { + + public enum State { + INITIALIZED, + STOPPED, + STARTED, + CLOSED + } + + private volatile State state = State.INITIALIZED; + + public State state() { + return this.state; + } + + /** + * Returns {@code true} if the state is initialized. + */ + public boolean initialized() { + return state == State.INITIALIZED; + } + + /** + * Returns {@code true} if the state is started. + */ + public boolean started() { + return state == State.STARTED; + } + + /** + * Returns {@code true} if the state is stopped. + */ + public boolean stopped() { + return state == State.STOPPED; + } + + /** + * Returns {@code true} if the state is closed. + */ + public boolean closed() { + return state == State.CLOSED; + } + + public boolean stoppedOrClosed() { + State state = this.state; + return state == State.STOPPED || state == State.CLOSED; + } + + public boolean canMoveToStarted() throws IllegalStateException { + State localState = this.state; + if (localState == State.INITIALIZED || localState == State.STOPPED) { + return true; + } + if (localState == State.STARTED) { + return false; + } + if (localState == State.CLOSED) { + throw new IllegalStateException("Can't move to started state when closed"); + } + throw new IllegalStateException("Can't move to started with unknown state"); + } + + public synchronized boolean moveToStarted() throws IllegalStateException { + State localState = this.state; + if (localState == State.INITIALIZED || localState == State.STOPPED) { + state = State.STARTED; + return true; + } + if (localState == State.STARTED) { + return false; + } + if (localState == State.CLOSED) { + throw new IllegalStateException("Can't move to started state when closed"); + } + throw new IllegalStateException("Can't move to started with unknown state"); + } + + public boolean canMoveToStopped() throws IllegalStateException { + State localState = state; + if (localState == State.STARTED) { + return true; + } + if (localState == State.INITIALIZED || localState == State.STOPPED) { + return false; + } + if (localState == State.CLOSED) { + throw new IllegalStateException("Can't move to stopped state when closed"); + } + throw new IllegalStateException("Can't move to stopped with unknown state"); + } + + public synchronized boolean moveToStopped() throws IllegalStateException { + State localState = state; + if (localState == State.STARTED) { + state = State.STOPPED; + return true; + } + if (localState == State.INITIALIZED || localState == State.STOPPED) { + return false; + } + if (localState == State.CLOSED) { + throw new IllegalStateException("Can't move to stopped state when closed"); + } + throw new IllegalStateException("Can't move to stopped with unknown state"); + } + + public boolean canMoveToClosed() throws IllegalStateException { + State localState = state; + if (localState == State.CLOSED) { + return false; + } + if (localState == State.STARTED) { + throw new IllegalStateException("Can't move to closed before moving to stopped mode"); + } + return true; + } + + public synchronized boolean moveToClosed() throws IllegalStateException { + State localState = state; + if (localState == State.CLOSED) { + return false; + } + if (localState == State.STARTED) { + throw new IllegalStateException("Can't move to closed before moving to stopped mode"); + } + state = State.CLOSED; + return true; + } + + @Override + public String toString() { + return state.toString(); + } + +} diff --git a/src/main/java/transportservice/component/LifecycleComponent.java b/src/main/java/transportservice/component/LifecycleComponent.java new file mode 100644 index 0000000..85a0ab4 --- /dev/null +++ b/src/main/java/transportservice/component/LifecycleComponent.java @@ -0,0 +1,50 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.component; + +import org.opensearch.common.component.Lifecycle; +import org.opensearch.common.component.LifecycleListener; +import org.opensearch.common.lease.Releasable; + +public interface LifecycleComponent extends Releasable { + + Lifecycle.State lifecycleState(); + + void addLifecycleListener(org.opensearch.common.component.LifecycleListener listener); + + void removeLifecycleListener(LifecycleListener listener); + + void start(); + + void stop(); +} diff --git a/src/main/java/transportservice/component/LifecycleListener.java b/src/main/java/transportservice/component/LifecycleListener.java new file mode 100644 index 0000000..f4415a9 --- /dev/null +++ b/src/main/java/transportservice/component/LifecycleListener.java @@ -0,0 +1,60 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.component; + +public abstract class LifecycleListener { + + public void beforeStart() { + + } + + public void afterStart() { + + } + + public void beforeStop() { + + } + + public void afterStop() { + + } + + public void beforeClose() { + + } + + public void afterClose() { + + } +} diff --git a/src/main/java/transportservice/netty4/Netty.java b/src/main/java/transportservice/netty4/Netty.java new file mode 100644 index 0000000..53ab395 --- /dev/null +++ b/src/main/java/transportservice/netty4/Netty.java @@ -0,0 +1,332 @@ +package transportservice.netty4; + +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.*; +import io.netty.channel.socket.nio.NioChannelOption; +import io.netty.util.AttributeKey; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.ExceptionsHelper; +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.SuppressForbidden; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.network.NetworkService; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.ByteSizeUnit; +import org.opensearch.common.unit.ByteSizeValue; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.common.util.concurrent.OpenSearchExecutors; +import org.opensearch.core.internal.net.NetUtils; +import org.opensearch.indices.breaker.CircuitBreakerService; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.TcpChannel; +import org.opensearch.transport.TcpServerChannel; +import org.opensearch.transport.TransportSettings; +import transportservice.*; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.net.SocketOption; +import java.util.Map; +import java.util.Set; + +import static org.opensearch.common.settings.Setting.byteSizeSetting; +import static org.opensearch.common.util.concurrent.ConcurrentCollections.newConcurrentMap; + +public class Netty extends TcpTransport { + + private volatile SharedGroupFactory.SharedGroup sharedGroup; + private final SharedGroupFactory sharedGroupFactory; + private final RecvByteBufAllocator recvByteBufAllocator; + private static final Logger logger = LogManager.getLogger(Netty.class); + private final ByteSizeValue receivePredictorMin; + private final ByteSizeValue receivePredictorMax; + private volatile Bootstrap clientBootstrap; + private final Map serverBootstraps = newConcurrentMap(); + protected Set profileSettings; + public static final Setting NETTY_RECEIVE_PREDICTOR_SIZE = Setting.byteSizeSetting( + "transportservice.transport.netty.receive_predictor_size", + new ByteSizeValue(64, ByteSizeUnit.KB), + Setting.Property.NodeScope + ); + public static final Setting NETTY_RECEIVE_PREDICTOR_MIN = byteSizeSetting( + "transportservice.transport.netty.receive_predictor_min", + NETTY_RECEIVE_PREDICTOR_SIZE, + Setting.Property.NodeScope + ); + public static final Setting NETTY_RECEIVE_PREDICTOR_MAX = byteSizeSetting( + "transportservice.transport.netty.receive_predictor_max", + NETTY_RECEIVE_PREDICTOR_SIZE, + Setting.Property.NodeScope + ); + + public static final Setting WORKER_COUNT = new Setting<>( + "transport.netty.worker_count", + (s) -> Integer.toString(OpenSearchExecutors.allocatedProcessors(s)), + (s) -> Setting.parseInt(s, 1, "transport.netty.worker_count"), + Setting.Property.NodeScope + ); + + public Netty( + Settings settings, + Version version, + ThreadPool threadPool, + NetworkService networkService, + PageCacheRecycler pageCacheRecycler, + NamedWriteableRegistry namedWriteableRegistry, + CircuitBreakerService circuitBreakerService, + SharedGroupFactory sharedGroupFactory + ) { + super(settings, version, threadPool, pageCacheRecycler, circuitBreakerService, namedWriteableRegistry, networkService); + Netty4Utils.setAvailableProcessors(OpenSearchExecutors.NODE_PROCESSORS_SETTING.get(settings)); + NettyAllocator.logAllocatorDescriptionIfNeeded(); + this.sharedGroupFactory = sharedGroupFactory; + this.profileSettings = getProfileSettings(Settings.builder().put("transport.profiles.test.port", "5555").put("transport.profiles.default.port", "3333").build()); + // See AdaptiveReceiveBufferSizePredictor#DEFAULT_XXX for default values in netty..., we can use higher ones for us, even fixed one + this.receivePredictorMin = NETTY_RECEIVE_PREDICTOR_MIN.get(settings); + this.receivePredictorMax = NETTY_RECEIVE_PREDICTOR_MAX.get(settings); + if (receivePredictorMax.getBytes() == receivePredictorMin.getBytes()) { + recvByteBufAllocator = new FixedRecvByteBufAllocator((int) receivePredictorMax.getBytes()); + } else { + recvByteBufAllocator = new AdaptiveRecvByteBufAllocator( + (int) receivePredictorMin.getBytes(), + (int) receivePredictorMin.getBytes(), + (int) receivePredictorMax.getBytes() + ); + } + } + + @Override + protected void doStart() { + boolean success = false; + try { + sharedGroup = sharedGroupFactory.getTransportGroup(); + clientBootstrap = createClientBootstrap(sharedGroup); + if (NetworkService.NETWORK_SERVER.get(settings)) { + for (ProfileSettings profileSettings : this.profileSettings) { + createServerBootstrap(profileSettings, sharedGroup); + bindServer(profileSettings); + } + } + super.doStart(); + success = true; + } finally { + if (success == false) { + doStop(); + } + } + } + + @Override + @SuppressForbidden(reason = "debug") + protected void stopInternal() { + Releasables.close(() -> { + if (sharedGroup != null) { + sharedGroup.shutdown(); + } + }, serverBootstraps::clear, () -> clientBootstrap = null); + } + + protected ChannelHandler getServerChannelInitializer(String name) { + return new ServerChannelInitializer(name); + } + + // server + private void createServerBootstrap(ProfileSettings profileSettings, SharedGroupFactory.SharedGroup sharedGroup) { + String name = profileSettings.profileName; + if (logger.isDebugEnabled()) { + logger.debug( + "using profile[{}], worker_count[{}], port[{}], bind_host[{}], publish_host[{}], receive_predictor[{}->{}]", + name, + sharedGroupFactory.getTransportWorkerCount(), + profileSettings.portOrRange, + profileSettings.bindHosts, + profileSettings.publishHosts, + receivePredictorMin, + receivePredictorMax + ); + } + + final ServerBootstrap serverBootstrap = new ServerBootstrap(); + + serverBootstrap.group(sharedGroup.getLowLevelGroup()); + + // NettyAllocator will return the channel type designed to work with the configuredAllocator + serverBootstrap.channel(NettyAllocator.getServerChannelType()); + + // Set the allocators for both the server channel and the child channels created + serverBootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); + serverBootstrap.childOption(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); + + serverBootstrap.childHandler(getServerChannelInitializer(name)); + serverBootstrap.handler(new ServerChannelExceptionHandler()); + + serverBootstrap.childOption(ChannelOption.TCP_NODELAY, profileSettings.tcpNoDelay); + serverBootstrap.childOption(ChannelOption.SO_KEEPALIVE, profileSettings.tcpKeepAlive); + if (profileSettings.tcpKeepAlive) { + // Note that transportservice.netty4.Netty logs a warning if it can't set the option + if (profileSettings.tcpKeepIdle >= 0) { + final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); + if (keepIdleOption != null) { + serverBootstrap.childOption(NioChannelOption.of(keepIdleOption), profileSettings.tcpKeepIdle); + } + } + if (profileSettings.tcpKeepInterval >= 0) { + final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); + if (keepIntervalOption != null) { + serverBootstrap.childOption(NioChannelOption.of(keepIntervalOption), profileSettings.tcpKeepInterval); + } + + } + if (profileSettings.tcpKeepCount >= 0) { + final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); + if (keepCountOption != null) { + serverBootstrap.childOption(NioChannelOption.of(keepCountOption), profileSettings.tcpKeepCount); + } + } + } + + if (profileSettings.sendBufferSize.getBytes() != -1) { + serverBootstrap.childOption(ChannelOption.SO_SNDBUF, Math.toIntExact(profileSettings.sendBufferSize.getBytes())); + } + + if (profileSettings.receiveBufferSize.getBytes() != -1) { + serverBootstrap.childOption(ChannelOption.SO_RCVBUF, Math.toIntExact(profileSettings.receiveBufferSize.bytesAsInt())); + } + + serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + serverBootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + + serverBootstrap.option(ChannelOption.SO_REUSEADDR, profileSettings.reuseAddress); + serverBootstrap.childOption(ChannelOption.SO_REUSEADDR, profileSettings.reuseAddress); + serverBootstrap.validate(); + + serverBootstraps.put(name, serverBootstrap); + } + + + @Override + protected TcpServerChannel bind(String s, InetSocketAddress inetSocketAddress) throws IOException { + Channel channel = serverBootstraps.get(s).bind(inetSocketAddress).syncUninterruptibly().channel(); + Netty4TcpServerChannel esChannel = new Netty4TcpServerChannel(channel); + channel.attr(SERVER_CHANNEL_KEY).set(esChannel); + return esChannel; + } + + + // client + private Bootstrap createClientBootstrap(SharedGroupFactory.SharedGroup sharedGroup) { + final Bootstrap bootstrap = new Bootstrap(); + bootstrap.group(sharedGroup.getLowLevelGroup()); + + // NettyAllocator will return the channel type designed to work with the configured allocator + assert Netty4NioSocketChannel.class.isAssignableFrom(NettyAllocator.getChannelType()); + bootstrap.channel(NettyAllocator.getChannelType()); + bootstrap.option(ChannelOption.ALLOCATOR, NettyAllocator.getAllocator()); + + bootstrap.option(ChannelOption.TCP_NODELAY, TransportSettings.TCP_NO_DELAY.get(settings)); + bootstrap.option(ChannelOption.SO_KEEPALIVE, TransportSettings.TCP_KEEP_ALIVE.get(settings)); + if (TransportSettings.TCP_KEEP_ALIVE.get(settings)) { + // Note that transportservice.Netty logs a warning if it can't set the option + if (TransportSettings.TCP_KEEP_IDLE.get(settings) >= 0) { + final SocketOption keepIdleOption = NetUtils.getTcpKeepIdleSocketOptionOrNull(); + if (keepIdleOption != null) { + bootstrap.option(NioChannelOption.of(keepIdleOption), TransportSettings.TCP_KEEP_IDLE.get(settings)); + } + } + if (TransportSettings.TCP_KEEP_INTERVAL.get(settings) >= 0) { + final SocketOption keepIntervalOption = NetUtils.getTcpKeepIntervalSocketOptionOrNull(); + if (keepIntervalOption != null) { + bootstrap.option(NioChannelOption.of(keepIntervalOption), TransportSettings.TCP_KEEP_INTERVAL.get(settings)); + } + } + if (TransportSettings.TCP_KEEP_COUNT.get(settings) >= 0) { + final SocketOption keepCountOption = NetUtils.getTcpKeepCountSocketOptionOrNull(); + if (keepCountOption != null) { + bootstrap.option(NioChannelOption.of(keepCountOption), TransportSettings.TCP_KEEP_COUNT.get(settings)); + } + } + } + + final ByteSizeValue tcpSendBufferSize = TransportSettings.TCP_SEND_BUFFER_SIZE.get(settings); + if (tcpSendBufferSize.getBytes() > 0) { + bootstrap.option(ChannelOption.SO_SNDBUF, Math.toIntExact(tcpSendBufferSize.getBytes())); + } + + final ByteSizeValue tcpReceiveBufferSize = TransportSettings.TCP_RECEIVE_BUFFER_SIZE.get(settings); + if (tcpReceiveBufferSize.getBytes() > 0) { + bootstrap.option(ChannelOption.SO_RCVBUF, Math.toIntExact(tcpReceiveBufferSize.getBytes())); + } + + bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, recvByteBufAllocator); + + final boolean reuseAddress = TransportSettings.TCP_REUSE_ADDRESS.get(settings); + bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress); + + return bootstrap; + } + + static final AttributeKey SERVER_CHANNEL_KEY = AttributeKey.newInstance("es-server-channel"); + static final AttributeKey CHANNEL_KEY = AttributeKey.newInstance("es-channel"); + + @ChannelHandler.Sharable + private class ServerChannelExceptionHandler extends ChannelInboundHandlerAdapter { + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + Netty4TcpServerChannel serverChannel = ctx.channel().attr(SERVER_CHANNEL_KEY).get(); + if (cause instanceof Error) { + onServerException(serverChannel, new Exception(cause)); + } else { + onServerException(serverChannel, (Exception) cause); + } + } + } + + private void addClosedExceptionLogger(Channel channel) { + channel.closeFuture().addListener(f -> { + if (f.isSuccess() == false) { + logger.debug(() -> new ParameterizedMessage("exception while closing channel: {}", channel), f.cause()); + } + }); + } + + + + // Another class + protected class ServerChannelInitializer extends ChannelInitializer { + + protected final String name; + private final NettyByteBufSizer sizer = new NettyByteBufSizer(); + + protected ServerChannelInitializer(String name) { + this.name = name; + } + + @Override + protected void initChannel(Channel ch) throws Exception { + addClosedExceptionLogger(ch); + assert ch instanceof Netty4NioSocketChannel; + NetUtils.tryEnsureReasonableKeepAliveConfig(((Netty4NioSocketChannel) ch).javaChannel()); + Netty4TcpChannel nettyTcpChannel = new Netty4TcpChannel(ch, true, name, ch.newSucceededFuture()); + ch.attr(CHANNEL_KEY).set(nettyTcpChannel); + ch.pipeline().addLast("byte_buf_sizer", sizer); + ch.pipeline().addLast("logging", new transportservice.netty4.OpenSearchLoggingHandler()); + ch.pipeline().addLast("dispatcher", new transportservice.netty4.Netty4MessageChannelHandler(pageCacheRecycler, Netty.this)); + serverAcceptedChannel(nettyTcpChannel); + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + ExceptionsHelper.maybeDieOnAnotherThread(cause); + super.exceptionCaught(ctx, cause); + } + } + +} diff --git a/src/main/java/transportservice/netty4/Netty4MessageChannelHandler.java b/src/main/java/transportservice/netty4/Netty4MessageChannelHandler.java new file mode 100644 index 0000000..7209b49 --- /dev/null +++ b/src/main/java/transportservice/netty4/Netty4MessageChannelHandler.java @@ -0,0 +1,223 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.netty4; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.*; +import org.opensearch.ExceptionsHelper; +import org.opensearch.OpenSearchException; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.Transports; +import transportservice.transport.InboundPipeline; +import transportservice.transport.Transport; + +import java.nio.channels.ClosedChannelException; +import java.util.ArrayDeque; +import java.util.Queue; + +/** + * A handler (must be the last one!) that does size based frame decoding and forwards the actual message + * to the relevant action. + */ +final class Netty4MessageChannelHandler extends ChannelDuplexHandler { + + private final Netty transport; + + private final Queue queuedWrites = new ArrayDeque<>(); + + private WriteOperation currentWrite; + private final InboundPipeline pipeline; + + + Netty4MessageChannelHandler(PageCacheRecycler recycler, Netty transport) { + this.transport = transport; + final ThreadPool threadPool = transport.getThreadPool(); + final Transport.RequestHandlers requestHandlers = transport.getRequestHandlers(); + this.pipeline = new InboundPipeline( + transport.getVersion(), + transport.getStatsTracker(), + recycler, + threadPool::relativeTimeInMillis, + transport.getInflightBreaker(), + requestHandlers::getHandler, + transport::inboundMessage + ); + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + assert Transports.assertDefaultThreadContext(transport.getThreadPool().getThreadContext()); + assert Transports.assertTransportThread(); + assert msg instanceof ByteBuf : "Expected message type ByteBuf, found: " + msg.getClass(); + + final ByteBuf buffer = (ByteBuf) msg; + Netty4TcpChannel channel = ctx.channel().attr(Netty.CHANNEL_KEY).get(); + final BytesReference wrapped = Netty4Utils.toBytesReference(buffer); + System.out.println("MESSAGE RECEIVED:" + wrapped.utf8ToString()); + try (ReleasableBytesReference reference = new ReleasableBytesReference(wrapped, buffer::release)) { + pipeline.handleBytes(channel, reference); + } + } + + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { + assert Transports.assertDefaultThreadContext(transport.getThreadPool().getThreadContext()); + ExceptionsHelper.maybeDieOnAnotherThread(cause); + final Throwable unwrapped = ExceptionsHelper.unwrap(cause, OpenSearchException.class); + final Throwable newCause = unwrapped != null ? unwrapped : cause; + Netty4TcpChannel tcpChannel = ctx.channel().attr(Netty.CHANNEL_KEY).get(); + if (newCause instanceof Error) { + transport.onException(tcpChannel, new Exception(newCause)); + } else { + transport.onException(tcpChannel, (Exception) newCause); + } + } + + @Override + public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { + assert msg instanceof ByteBuf; + assert Transports.assertDefaultThreadContext(transport.getThreadPool().getThreadContext()); + final boolean queued = queuedWrites.offer(new WriteOperation((ByteBuf) msg, promise)); + assert queued; + assert Transports.assertDefaultThreadContext(transport.getThreadPool().getThreadContext()); + } + + @Override + public void channelWritabilityChanged(ChannelHandlerContext ctx) { + assert Transports.assertDefaultThreadContext(transport.getThreadPool().getThreadContext()); + if (ctx.channel().isWritable()) { + doFlush(ctx); + } + ctx.fireChannelWritabilityChanged(); + } + + @Override + public void flush(ChannelHandlerContext ctx) { + assert Transports.assertDefaultThreadContext(transport.getThreadPool().getThreadContext()); + Channel channel = ctx.channel(); + if (channel.isWritable() || channel.isActive() == false) { + doFlush(ctx); + } + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + assert Transports.assertDefaultThreadContext(transport.getThreadPool().getThreadContext()); + doFlush(ctx); +// Releasables.closeWhileHandlingException(pipeline); + super.channelInactive(ctx); + } + + private void doFlush(ChannelHandlerContext ctx) { + assert ctx.executor().inEventLoop(); + final Channel channel = ctx.channel(); + if (channel.isActive() == false) { + if (currentWrite != null) { + currentWrite.promise.tryFailure(new ClosedChannelException()); + } + failQueuedWrites(); + return; + } + while (channel.isWritable()) { + if (currentWrite == null) { + currentWrite = queuedWrites.poll(); + } + if (currentWrite == null) { + break; + } + final WriteOperation write = currentWrite; + if (write.buf.readableBytes() == 0) { + write.promise.trySuccess(); + currentWrite = null; + continue; + } + final int readableBytes = write.buf.readableBytes(); + final int bufferSize = Math.min(readableBytes, 1 << 18); + final int readerIndex = write.buf.readerIndex(); + final boolean sliced = readableBytes != bufferSize; + final ByteBuf writeBuffer; + if (sliced) { + writeBuffer = write.buf.retainedSlice(readerIndex, bufferSize); + write.buf.readerIndex(readerIndex + bufferSize); + } else { + writeBuffer = write.buf; + } + final ChannelFuture writeFuture = ctx.write(writeBuffer); + if (sliced == false || write.buf.readableBytes() == 0) { + currentWrite = null; + writeFuture.addListener(future -> { + assert ctx.executor().inEventLoop(); + if (future.isSuccess()) { + write.promise.trySuccess(); + } else { + write.promise.tryFailure(future.cause()); + } + }); + } else { + writeFuture.addListener(future -> { + assert ctx.executor().inEventLoop(); + if (future.isSuccess() == false) { + write.promise.tryFailure(future.cause()); + } + }); + } + ctx.flush(); + if (channel.isActive() == false) { + failQueuedWrites(); + return; + } + } + } + + private void failQueuedWrites() { + WriteOperation queuedWrite; + while ((queuedWrite = queuedWrites.poll()) != null) { + queuedWrite.promise.tryFailure(new ClosedChannelException()); + } + } + + private static final class WriteOperation { + + private final ByteBuf buf; + + private final ChannelPromise promise; + + WriteOperation(ByteBuf buf, ChannelPromise promise) { + this.buf = buf; + this.promise = promise; + } + } +} diff --git a/src/main/java/transportservice/netty4/Netty4Plugin.java b/src/main/java/transportservice/netty4/Netty4Plugin.java new file mode 100644 index 0000000..ad53f47 --- /dev/null +++ b/src/main/java/transportservice/netty4/Netty4Plugin.java @@ -0,0 +1,56 @@ +//package transportservice.netty4; +// +//import org.apache.lucene.util.SetOnce; +//import org.opensearch.Version; +//import org.opensearch.common.io.stream.NamedWriteableRegistry; +//import org.opensearch.common.network.NetworkService; +//import org.opensearch.common.settings.Settings; +//import org.opensearch.common.util.PageCacheRecycler; +//import org.opensearch.indices.breaker.CircuitBreakerService; +//import org.opensearch.threadpool.ThreadPool; +//import transportservice.SharedGroupFactory; +// +//import java.util.Collections; +//import java.util.Map; +// +//public class Netty4Plugin { +// +// public static final String NETTY_TRANSPORT_NAME = "netty4"; +// private final SetOnce groupFactory = new SetOnce<>(); +// +// public Map getTransports( +// Settings settings, +// ThreadPool threadPool, +// PageCacheRecycler pageCacheRecycler, +// CircuitBreakerService circuitBreakerService, +// NamedWriteableRegistry namedWriteableRegistry, +// NetworkService networkService +// ) { +// return Collections.singletonMap( +// NETTY_TRANSPORT_NAME, +// () -> new Netty( +// settings, +// Version.CURRENT, +// threadPool, +// networkService, +// pageCacheRecycler, +// namedWriteableRegistry, +// circuitBreakerService, +// getSharedGroupFactory(settings) +// ) +// ); +// } +// +// private SharedGroupFactory getSharedGroupFactory(Settings settings) { +// SharedGroupFactory groupFactory = this.groupFactory.get(); +// if (groupFactory != null) { +// assert groupFactory.getSettings().equals(settings) : "Different settings than originally provided"; +// return groupFactory; +// } else { +// this.groupFactory.set(new SharedGroupFactory(settings)); +// return this.groupFactory.get(); +// } +// } +// +// +//} diff --git a/src/main/java/netty4/Netty4TcpChannel.java b/src/main/java/transportservice/netty4/Netty4TcpChannel.java similarity index 98% rename from src/main/java/netty4/Netty4TcpChannel.java rename to src/main/java/transportservice/netty4/Netty4TcpChannel.java index 3d32172..a2bd93a 100644 --- a/src/main/java/netty4/Netty4TcpChannel.java +++ b/src/main/java/transportservice/netty4/Netty4TcpChannel.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package netty4; +package transportservice.netty4; import io.netty.channel.Channel; import io.netty.channel.ChannelFuture; @@ -40,8 +40,8 @@ import org.opensearch.common.Nullable; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.concurrent.CompletableContext; -import org.opensearch.transport.TcpChannel; import org.opensearch.transport.TransportException; +import transportservice.transport.TcpChannel; import java.net.InetSocketAddress; diff --git a/src/main/java/netty4/Netty4TcpServerChannel.java b/src/main/java/transportservice/netty4/Netty4TcpServerChannel.java similarity index 98% rename from src/main/java/netty4/Netty4TcpServerChannel.java rename to src/main/java/transportservice/netty4/Netty4TcpServerChannel.java index c57a793..12e8ec3 100644 --- a/src/main/java/netty4/Netty4TcpServerChannel.java +++ b/src/main/java/transportservice/netty4/Netty4TcpServerChannel.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package netty4; +package transportservice.netty4; import io.netty.channel.Channel; import org.opensearch.action.ActionListener; diff --git a/src/main/java/netty4/Netty4Utils.java b/src/main/java/transportservice/netty4/Netty4Utils.java similarity index 95% rename from src/main/java/netty4/Netty4Utils.java rename to src/main/java/transportservice/netty4/Netty4Utils.java index 2326757..a5ea528 100644 --- a/src/main/java/netty4/Netty4Utils.java +++ b/src/main/java/transportservice/netty4/Netty4Utils.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package netty4; +package transportservice.netty4; import io.netty.buffer.ByteBuf; import io.netty.buffer.CompositeByteBuf; @@ -54,7 +54,7 @@ public class Netty4Utils { private static final AtomicBoolean isAvailableProcessorsSet = new AtomicBoolean(); /** - * Set the number of available processors that Netty uses for sizing various resources (e.g., thread pools). + * Set the number of available processors that transportservice.netty4.Netty uses for sizing various resources (e.g., thread pools). * * @param availableProcessors the number of available processors * @throws IllegalStateException if available processors was set previously and the specified value does not match the already-set value @@ -75,7 +75,7 @@ public static void setAvailableProcessors(final int availableProcessors) { } else if (availableProcessors != NettyRuntime.availableProcessors()) { /* * We have previously set the available processors yet either we are trying to set it to a different value now or there is a bug - * in Netty and our previous value did not take, bail. + * in transportservice.Netty and our previous value did not take, bail. */ final String message = String.format( Locale.ROOT, diff --git a/src/main/java/netty4/OpenSearchLoggingHandler.java b/src/main/java/transportservice/netty4/OpenSearchLoggingHandler.java similarity index 97% rename from src/main/java/netty4/OpenSearchLoggingHandler.java rename to src/main/java/transportservice/netty4/OpenSearchLoggingHandler.java index 627e91a..0e55d99 100644 --- a/src/main/java/netty4/OpenSearchLoggingHandler.java +++ b/src/main/java/transportservice/netty4/OpenSearchLoggingHandler.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package netty4; +package transportservice.netty4; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.logging.LogLevel; diff --git a/src/main/java/transport/BytesTransportRequest.java b/src/main/java/transportservice/transport/BytesTransportRequest.java similarity index 98% rename from src/main/java/transport/BytesTransportRequest.java rename to src/main/java/transportservice/transport/BytesTransportRequest.java index 580c3fc..4f4a754 100644 --- a/src/main/java/transport/BytesTransportRequest.java +++ b/src/main/java/transportservice/transport/BytesTransportRequest.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package transport; +package transportservice.transport; import org.opensearch.Version; import org.opensearch.common.bytes.BytesReference; diff --git a/src/main/java/transportservice/transport/ClusterConnectionManager.java b/src/main/java/transportservice/transport/ClusterConnectionManager.java new file mode 100644 index 0000000..ae6fe98 --- /dev/null +++ b/src/main/java/transportservice/transport/ClusterConnectionManager.java @@ -0,0 +1,287 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + * + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + + +package transportservice.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.util.concurrent.*; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.transport.*; +import transportservice.common.ListenableFuture; +import transportservice.action.ActionListener; + +import java.util.Collections; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * This class manages node connections within a cluster. The connection is opened by the underlying transport. + * Once the connection is opened, this class manages the connection. This includes closing the connection when + * the connection manager is closed. + */ +public class ClusterConnectionManager implements ConnectionManager { + + private static final Logger logger = LogManager.getLogger(ClusterConnectionManager.class); + + private final ConcurrentMap connectedNodes = ConcurrentCollections.newConcurrentMap(); + private final ConcurrentMap> pendingConnections = ConcurrentCollections.newConcurrentMap(); + private final AbstractRefCounted connectingRefCounter = new AbstractRefCounted("connection manager") { + @Override + protected void closeInternal() { + Iterator> iterator = connectedNodes.entrySet().iterator(); + while (iterator.hasNext()) { + Map.Entry next = iterator.next(); + try { + IOUtils.closeWhileHandlingException(next.getValue()); + } finally { + iterator.remove(); + } + } + closeLatch.countDown(); + } + }; + private final Transport transport; + private final ConnectionProfile defaultProfile; + private final AtomicBoolean closing = new AtomicBoolean(false); + private final CountDownLatch closeLatch = new CountDownLatch(1); + private final DelegatingNodeConnectionListener connectionListener = new DelegatingNodeConnectionListener(); + + public ClusterConnectionManager(Settings settings, Transport transport) { + this(ConnectionProfile.buildDefaultConnectionProfile(settings), transport); + } + + public ClusterConnectionManager(ConnectionProfile connectionProfile, Transport transport) { + this.transport = transport; + this.defaultProfile = connectionProfile; + } + + @Override + public void addListener(TransportConnectionListener listener) { + this.connectionListener.addListener(listener); + } + + @Override + public void removeListener(TransportConnectionListener listener) { + this.connectionListener.removeListener(listener); + } + + @Override + public void openConnection(DiscoveryNode node, ConnectionProfile connectionProfile, transportservice.action.ActionListener listener) { + ConnectionProfile resolvedProfile = ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); + internalOpenConnection(node, resolvedProfile, listener); + } + + /** + * Connects to a node with the given connection profile. If the node is already connected this method has no effect. + * Once a successful is established, it can be validated before being exposed. + * The ActionListener will be called on the calling thread or the generic thread pool. + */ + @Override + public void connectToNode( + DiscoveryNode node, + ConnectionProfile connectionProfile, + ConnectionValidator connectionValidator, + transportservice.action.ActionListener listener + ) throws ConnectTransportException { + ConnectionProfile resolvedProfile = ConnectionProfile.resolveConnectionProfile(connectionProfile, defaultProfile); + if (node == null) { + listener.onFailure(new ConnectTransportException(null, "can't connect to a null node")); + return; + } + + if (connectingRefCounter.tryIncRef() == false) { + listener.onFailure(new IllegalStateException("connection manager is closed")); + return; + } + + if (connectedNodes.containsKey(node)) { + connectingRefCounter.decRef(); + listener.onResponse(null); + return; + } + + final ListenableFuture currentListener = new ListenableFuture<>(); + final ListenableFuture existingListener = pendingConnections.putIfAbsent(node, currentListener); + if (existingListener != null) { + try { + // wait on previous entry to complete connection attempt + existingListener.addListener(listener, OpenSearchExecutors.newDirectExecutorService()); + } finally { + connectingRefCounter.decRef(); + } + return; + } + + currentListener.addListener(listener, OpenSearchExecutors.newDirectExecutorService()); + + final RunOnce releaseOnce = new RunOnce(connectingRefCounter::decRef); + internalOpenConnection(node, resolvedProfile, transportservice.action.ActionListener.wrap(conn -> { + connectionValidator.validate(conn, resolvedProfile, transportservice.action.ActionListener.wrap(ignored -> { + assert Transports.assertNotTransportThread("connection validator success"); + try { + if (connectedNodes.putIfAbsent(node, conn) != null) { + logger.debug("existing connection to node [{}], closing new redundant connection", node); + IOUtils.closeWhileHandlingException(conn); + } else { + logger.debug("connected to node [{}]", node); + try { + connectionListener.onNodeConnected(node, conn); + } finally { + final Transport.Connection finalConnection = conn; + conn.addCloseListener(transportservice.action.ActionListener.wrap(() -> { + logger.trace("unregistering {} after connection close and marking as disconnected", node); + connectedNodes.remove(node, finalConnection); + connectionListener.onNodeDisconnected(node, conn); + })); + } + } + } finally { + ListenableFuture future = pendingConnections.remove(node); + assert future == currentListener : "Listener in pending map is different than the expected listener"; + releaseOnce.run(); + future.onResponse(null); + } + }, e -> { + assert Transports.assertNotTransportThread("connection validator failure"); + IOUtils.closeWhileHandlingException(conn); + failConnectionListeners(node, releaseOnce, e, currentListener); + })); + }, e -> { + assert Transports.assertNotTransportThread("internalOpenConnection failure"); + failConnectionListeners(node, releaseOnce, e, currentListener); + })); + } + + /** + * Returns a connection for the given node if the node is connected. + * Connections returned from this method must not be closed. The lifecycle of this connection is + * maintained by this connection manager + * + * @throws NodeNotConnectedException if the node is not connected + * @see #connectToNode(DiscoveryNode, ConnectionProfile, ConnectionValidator, ActionListener) + */ + @Override + public Transport.Connection getConnection(DiscoveryNode node) { + Transport.Connection connection = connectedNodes.get(node); + if (connection == null) { + throw new NodeNotConnectedException(node, "Node not connected"); + } + return connection; + } + + /** + * Returns {@code true} if the node is connected. + */ + @Override + public boolean nodeConnected(DiscoveryNode node) { + return connectedNodes.containsKey(node); + } + + /** + * Disconnected from the given node, if not connected, will do nothing. + */ + @Override + public void disconnectFromNode(DiscoveryNode node) { + Transport.Connection nodeChannels = connectedNodes.remove(node); + if (nodeChannels != null) { + // if we found it and removed it we close + nodeChannels.close(); + } + } + + /** + * Returns the number of nodes this manager is connected to. + */ + @Override + public int size() { + return connectedNodes.size(); + } + + @Override + public Set getAllConnectedNodes() { + return Collections.unmodifiableSet(connectedNodes.keySet()); + } + + @Override + public void close() { + internalClose(true); + } + + @Override + public void closeNoBlock() { + internalClose(false); + } + + private void internalClose(boolean waitForPendingConnections) { + assert Transports.assertNotTransportThread("Closing ConnectionManager"); + if (closing.compareAndSet(false, true)) { + connectingRefCounter.decRef(); + if (waitForPendingConnections) { + try { + closeLatch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new IllegalStateException(e); + } + } + } + } + + private void internalOpenConnection( + DiscoveryNode node, + ConnectionProfile connectionProfile, + transportservice.action.ActionListener listener + ) { + transport.openConnection(node, connectionProfile, transportservice.action.ActionListener.map(listener, connection -> { + assert Transports.assertNotTransportThread("internalOpenConnection success"); + try { + connectionListener.onConnectionOpened(connection); + } finally { + connection.addCloseListener(transportservice.action.ActionListener.wrap(() -> connectionListener.onConnectionClosed(connection))); + } + if (connection.isClosed()) { + throw new ConnectTransportException(node, "a channel closed while connecting"); + } + return connection; + })); + } + + private void failConnectionListeners(DiscoveryNode node, RunOnce releaseOnce, Exception e, ListenableFuture expectedListener) { + ListenableFuture future = pendingConnections.remove(node); + releaseOnce.run(); + if (future != null) { + assert future == expectedListener : "Listener in pending map is different than the expected listener"; + future.onFailure(e); + } + } + + @Override + public ConnectionProfile getConnectionProfile() { + return defaultProfile; + } + +} diff --git a/src/main/java/transport/CompressibleBytesOutputStream.java b/src/main/java/transportservice/transport/CompressibleBytesOutputStream.java similarity index 99% rename from src/main/java/transport/CompressibleBytesOutputStream.java rename to src/main/java/transportservice/transport/CompressibleBytesOutputStream.java index 2d36ddb..a2b1307 100644 --- a/src/main/java/transport/CompressibleBytesOutputStream.java +++ b/src/main/java/transportservice/transport/CompressibleBytesOutputStream.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package transport; +package transportservice.transport; import org.opensearch.common.bytes.BytesReference; import org.opensearch.common.compress.CompressorFactory; diff --git a/src/main/java/transportservice/transport/ConnectTransportException.java b/src/main/java/transportservice/transport/ConnectTransportException.java new file mode 100644 index 0000000..79b1074 --- /dev/null +++ b/src/main/java/transportservice/transport/ConnectTransportException.java @@ -0,0 +1,77 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.transport.ActionTransportException; + +import java.io.IOException; + +public class ConnectTransportException extends ActionTransportException { + + private final DiscoveryNode node; + + public ConnectTransportException(DiscoveryNode node, String msg) { + this(node, msg, null, null); + } + + public ConnectTransportException(DiscoveryNode node, String msg, String action) { + this(node, msg, action, null); + } + + public ConnectTransportException(DiscoveryNode node, String msg, Throwable cause) { + this(node, msg, null, cause); + } + + public ConnectTransportException(DiscoveryNode node, String msg, String action, Throwable cause) { + super(node == null ? null : node.getName(), node == null ? null : node.getAddress(), action, msg, cause); + this.node = node; + } + + public ConnectTransportException(StreamInput in) throws IOException { + super(in); + node = in.readOptionalWriteable(DiscoveryNode::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeOptionalWriteable(node); + } + + public DiscoveryNode node() { + return node; + } +} diff --git a/src/main/java/transportservice/transport/ConnectionManager.java b/src/main/java/transportservice/transport/ConnectionManager.java new file mode 100644 index 0000000..48d7181 --- /dev/null +++ b/src/main/java/transportservice/transport/ConnectionManager.java @@ -0,0 +1,119 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.cluster.node.DiscoveryNode; +import transportservice.action.ActionListener; + +import java.io.Closeable; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArrayList; + +public interface ConnectionManager extends Closeable { + + void addListener(TransportConnectionListener listener); + + void removeListener(TransportConnectionListener listener); + + void openConnection(DiscoveryNode node, ConnectionProfile connectionProfile, ActionListener listener); + + void connectToNode( + DiscoveryNode node, + ConnectionProfile connectionProfile, + ConnectionValidator connectionValidator, + ActionListener listener + ) throws ConnectTransportException; + + Transport.Connection getConnection(DiscoveryNode node); + + boolean nodeConnected(DiscoveryNode node); + + void disconnectFromNode(DiscoveryNode node); + + Set getAllConnectedNodes(); + + int size(); + + @Override + void close(); + + void closeNoBlock(); + + transportservice.transport.ConnectionProfile getConnectionProfile(); + + @FunctionalInterface + interface ConnectionValidator { + void validate(Transport.Connection connection, ConnectionProfile profile, ActionListener listener); + } + + final class DelegatingNodeConnectionListener implements TransportConnectionListener { + + private final CopyOnWriteArrayList listeners = new CopyOnWriteArrayList<>(); + + @Override + public void onNodeDisconnected(DiscoveryNode key, Transport.Connection connection) { + for (TransportConnectionListener listener : listeners) { + listener.onNodeDisconnected(key, connection); + } + } + + @Override + public void onNodeConnected(DiscoveryNode node, Transport.Connection connection) { + for (TransportConnectionListener listener : listeners) { + listener.onNodeConnected(node, connection); + } + } + + @Override + public void onConnectionOpened(Transport.Connection connection) { + for (TransportConnectionListener listener : listeners) { + listener.onConnectionOpened(connection); + } + } + + @Override + public void onConnectionClosed(Transport.Connection connection) { + for (TransportConnectionListener listener : listeners) { + listener.onConnectionClosed(connection); + } + } + + public void addListener(TransportConnectionListener listener) { + listeners.addIfAbsent(listener); + } + + public void removeListener(TransportConnectionListener listener) { + listeners.remove(listener); + } + } +} diff --git a/src/main/java/transportservice/transport/ConnectionProfile.java b/src/main/java/transportservice/transport/ConnectionProfile.java new file mode 100644 index 0000000..925f1c3 --- /dev/null +++ b/src/main/java/transportservice/transport/ConnectionProfile.java @@ -0,0 +1,368 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Nullable; +import org.opensearch.common.settings.Settings; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.transport.TransportRequestOptions; +import org.opensearch.transport.TransportSettings; + +import java.util.*; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * A connection profile describes how many connection are established to specific node for each of the available request types. + * ({@link TransportRequestOptions.Type}). This allows to tailor a connection towards a specific usage. + */ +public final class ConnectionProfile { + + /** + * takes a {@link ConnectionProfile} resolves it to a fully specified (i.e., no nulls) profile + */ + public static ConnectionProfile resolveConnectionProfile(@Nullable ConnectionProfile profile, ConnectionProfile fallbackProfile) { + Objects.requireNonNull(fallbackProfile); + if (profile == null) { + return fallbackProfile; + } else if (profile.getConnectTimeout() != null + && profile.getHandshakeTimeout() != null + && profile.getPingInterval() != null + && profile.getCompressionEnabled() != null) { + return profile; + } else { + Builder builder = new Builder(profile); + if (profile.getConnectTimeout() == null) { + builder.setConnectTimeout(fallbackProfile.getConnectTimeout()); + } + if (profile.getHandshakeTimeout() == null) { + builder.setHandshakeTimeout(fallbackProfile.getHandshakeTimeout()); + } + if (profile.getPingInterval() == null) { + builder.setPingInterval(fallbackProfile.getPingInterval()); + } + if (profile.getCompressionEnabled() == null) { + builder.setCompressionEnabled(fallbackProfile.getCompressionEnabled()); + } + return builder.build(); + } + } + + /** + * Builds a default connection profile based on the provided settings. + * + * @param settings to build the connection profile from + * @return the connection profile + */ + public static ConnectionProfile buildDefaultConnectionProfile(Settings settings) { + int connectionsPerNodeRecovery = TransportSettings.CONNECTIONS_PER_NODE_RECOVERY.get(settings); + int connectionsPerNodeBulk = TransportSettings.CONNECTIONS_PER_NODE_BULK.get(settings); + int connectionsPerNodeReg = TransportSettings.CONNECTIONS_PER_NODE_REG.get(settings); + int connectionsPerNodeState = TransportSettings.CONNECTIONS_PER_NODE_STATE.get(settings); + int connectionsPerNodePing = TransportSettings.CONNECTIONS_PER_NODE_PING.get(settings); + Builder builder = new Builder(); + builder.setConnectTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)); + builder.setHandshakeTimeout(TransportSettings.CONNECT_TIMEOUT.get(settings)); + builder.setPingInterval(TransportSettings.PING_SCHEDULE.get(settings)); + builder.setCompressionEnabled(TransportSettings.TRANSPORT_COMPRESS.get(settings)); + builder.addConnections(connectionsPerNodeBulk, TransportRequestOptions.Type.BULK); + builder.addConnections(connectionsPerNodePing, TransportRequestOptions.Type.PING); + // if we are not master eligible we don't need a dedicated channel to publish the state + builder.addConnections(DiscoveryNode.isMasterNode(settings) ? connectionsPerNodeState : 0, TransportRequestOptions.Type.STATE); + // if we are not a data-node we don't need any dedicated channels for recovery + builder.addConnections(DiscoveryNode.isDataNode(settings) ? connectionsPerNodeRecovery : 0, TransportRequestOptions.Type.RECOVERY); + builder.addConnections(connectionsPerNodeReg, TransportRequestOptions.Type.REG); + return builder.build(); + } + + /** + * Builds a connection profile that is dedicated to a single channel type. Use this + * when opening single use connections + */ + public static ConnectionProfile buildSingleChannelProfile(TransportRequestOptions.Type channelType) { + return buildSingleChannelProfile(channelType, null, null, null, null); + } + + /** + * Builds a connection profile that is dedicated to a single channel type. Allows passing connection and + * handshake timeouts and compression settings. + */ + public static ConnectionProfile buildSingleChannelProfile( + TransportRequestOptions.Type channelType, + @Nullable TimeValue connectTimeout, + @Nullable TimeValue handshakeTimeout, + @Nullable TimeValue pingInterval, + @Nullable Boolean compressionEnabled + ) { + Builder builder = new Builder(); + builder.addConnections(1, channelType); + final EnumSet otherTypes = EnumSet.allOf(TransportRequestOptions.Type.class); + otherTypes.remove(channelType); + builder.addConnections(0, otherTypes.toArray(new TransportRequestOptions.Type[0])); + if (connectTimeout != null) { + builder.setConnectTimeout(connectTimeout); + } + if (handshakeTimeout != null) { + builder.setHandshakeTimeout(handshakeTimeout); + } + if (pingInterval != null) { + builder.setPingInterval(pingInterval); + } + if (compressionEnabled != null) { + builder.setCompressionEnabled(compressionEnabled); + } + return builder.build(); + } + + private final List handles; + private final int numConnections; + private final TimeValue connectTimeout; + private final TimeValue handshakeTimeout; + private final TimeValue pingInterval; + private final Boolean compressionEnabled; + + private ConnectionProfile( + List handles, + int numConnections, + TimeValue connectTimeout, + TimeValue handshakeTimeout, + TimeValue pingInterval, + Boolean compressionEnabled + ) { + this.handles = handles; + this.numConnections = numConnections; + this.connectTimeout = connectTimeout; + this.handshakeTimeout = handshakeTimeout; + this.pingInterval = pingInterval; + this.compressionEnabled = compressionEnabled; + } + + /** + * A builder to build a new {@link ConnectionProfile} + */ + public static class Builder { + private final List handles = new ArrayList<>(); + private final Set addedTypes = EnumSet.noneOf(TransportRequestOptions.Type.class); + private int numConnections = 0; + private TimeValue connectTimeout; + private TimeValue handshakeTimeout; + private Boolean compressionEnabled; + private TimeValue pingInterval; + + /** create an empty builder */ + public Builder() {} + + /** copy constructor, using another profile as a base */ + public Builder(ConnectionProfile source) { + handles.addAll(source.getHandles()); + numConnections = source.getNumConnections(); + handles.forEach(th -> addedTypes.addAll(th.types)); + connectTimeout = source.getConnectTimeout(); + handshakeTimeout = source.getHandshakeTimeout(); + compressionEnabled = source.getCompressionEnabled(); + pingInterval = source.getPingInterval(); + } + + /** + * Sets a connect timeout for this connection profile + */ + public Builder setConnectTimeout(TimeValue connectTimeout) { + if (connectTimeout.millis() < 0) { + throw new IllegalArgumentException("connectTimeout must be non-negative but was: " + connectTimeout); + } + this.connectTimeout = connectTimeout; + return this; + } + + /** + * Sets a handshake timeout for this connection profile + */ + public Builder setHandshakeTimeout(TimeValue handshakeTimeout) { + if (handshakeTimeout.millis() < 0) { + throw new IllegalArgumentException("handshakeTimeout must be non-negative but was: " + handshakeTimeout); + } + this.handshakeTimeout = handshakeTimeout; + return this; + } + + /** + * Sets a ping interval for this connection profile + */ + public Builder setPingInterval(TimeValue pingInterval) { + this.pingInterval = pingInterval; + return this; + } + + /** + * Sets compression enabled for this connection profile + */ + public Builder setCompressionEnabled(boolean compressionEnabled) { + this.compressionEnabled = compressionEnabled; + return this; + } + + /** + * Adds a number of connections for one or more types. Each type can only be added once. + * @param numConnections the number of connections to use in the pool for the given connection types + * @param types a set of types that should share the given number of connections + */ + public Builder addConnections(int numConnections, TransportRequestOptions.Type... types) { + if (types == null || types.length == 0) { + throw new IllegalArgumentException("types must not be null"); + } + for (TransportRequestOptions.Type type : types) { + if (addedTypes.contains(type)) { + throw new IllegalArgumentException("type [" + type + "] is already registered"); + } + } + addedTypes.addAll(Arrays.asList(types)); + handles.add(new ConnectionTypeHandle(this.numConnections, numConnections, EnumSet.copyOf(Arrays.asList(types)))); + this.numConnections += numConnections; + return this; + } + + /** + * Creates a new {@link ConnectionProfile} based on the added connections. + * @throws IllegalStateException if any of the {@link TransportRequestOptions.Type} enum is missing + */ + public ConnectionProfile build() { + EnumSet types = EnumSet.allOf(TransportRequestOptions.Type.class); + types.removeAll(addedTypes); + if (types.isEmpty() == false) { + throw new IllegalStateException("not all types are added for this connection profile - missing types: " + types); + } + return new ConnectionProfile( + Collections.unmodifiableList(handles), + numConnections, + connectTimeout, + handshakeTimeout, + pingInterval, + compressionEnabled + ); + } + + } + + /** + * Returns the connect timeout or null if no explicit timeout is set on this profile. + */ + public TimeValue getConnectTimeout() { + return connectTimeout; + } + + /** + * Returns the handshake timeout or null if no explicit timeout is set on this profile. + */ + public TimeValue getHandshakeTimeout() { + return handshakeTimeout; + } + + /** + * Returns the ping interval or null if no explicit ping interval is set on this profile. + */ + public TimeValue getPingInterval() { + return pingInterval; + } + + /** + * Returns boolean indicating if compression is enabled or null if no explicit compression + * is set on this profile. + */ + public Boolean getCompressionEnabled() { + return compressionEnabled; + } + + /** + * Returns the total number of connections for this profile + */ + public int getNumConnections() { + return numConnections; + } + + /** + * Returns the number of connections per type for this profile. This might return a count that is shared with other types such + * that the sum of all connections per type might be higher than {@link #getNumConnections()}. For instance if + * {@link TransportRequestOptions.Type#BULK} shares connections with + * {@link TransportRequestOptions.Type#REG} they will return both the same number of connections from + * this method but the connections are not distinct. + */ + public int getNumConnectionsPerType(TransportRequestOptions.Type type) { + for (ConnectionTypeHandle handle : handles) { + if (handle.getTypes().contains(type)) { + return handle.length; + } + } + throw new AssertionError("no handle found for type: " + type); + } + + /** + * Returns the type handles for this connection profile + */ + List getHandles() { + return Collections.unmodifiableList(handles); + } + + /** + * Connection type handle encapsulates the logic which connection + */ + static final class ConnectionTypeHandle { + public final int length; + public final int offset; + private final Set types; + private final AtomicInteger counter = new AtomicInteger(); + + private ConnectionTypeHandle(int offset, int length, Set types) { + this.length = length; + this.offset = offset; + this.types = types; + } + + /** + * Returns one of the channels out configured for this handle. The channel is selected in a round-robin + * fashion. + */ + T getChannel(List channels) { + if (length == 0) { + throw new IllegalStateException("can't select channel size is 0 for types: " + types); + } + assert channels.size() >= offset + length : "illegal size: " + channels.size() + " expected >= " + (offset + length); + return channels.get(offset + Math.floorMod(counter.incrementAndGet(), length)); + } + + /** + * Returns all types for this handle + */ + Set getTypes() { + return types; + } + } +} diff --git a/src/main/java/transport/Header.java b/src/main/java/transportservice/transport/Header.java similarity index 99% rename from src/main/java/transport/Header.java rename to src/main/java/transportservice/transport/Header.java index 14b06eb..2262ea3 100644 --- a/src/main/java/transport/Header.java +++ b/src/main/java/transportservice/transport/Header.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package transport; +package transportservice.transport; import org.opensearch.Version; import org.opensearch.common.collect.Tuple; diff --git a/src/main/java/transportservice/transport/InboundAggregator.java b/src/main/java/transportservice/transport/InboundAggregator.java new file mode 100644 index 0000000..5baf99b --- /dev/null +++ b/src/main/java/transportservice/transport/InboundAggregator.java @@ -0,0 +1,263 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.breaker.CircuitBreakingException; +import org.opensearch.common.bytes.BytesArray; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.bytes.CompositeBytesReference; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.transport.*; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; + +public class InboundAggregator implements Releasable { + + private final Supplier circuitBreaker; + private final Predicate requestCanTripBreaker; + + private ReleasableBytesReference firstContent; + private ArrayList contentAggregation; + private Header currentHeader; + private Exception aggregationException; + private boolean canTripBreaker = true; + private boolean isClosed = false; + + public InboundAggregator( + Supplier circuitBreaker, + Function> registryFunction + ) { + this(circuitBreaker, (Predicate) actionName -> { + final RequestHandlerRegistry reg = registryFunction.apply(actionName); + if (reg == null) { + throw new ActionNotFoundTransportException(actionName); + } else { + return reg.canTripCircuitBreaker(); + } + }); + } + + // Visible for testing + InboundAggregator(Supplier circuitBreaker, Predicate requestCanTripBreaker) { + this.circuitBreaker = circuitBreaker; + this.requestCanTripBreaker = requestCanTripBreaker; + } + + public void headerReceived(Header header) { + ensureOpen(); + assert isAggregating() == false; + assert firstContent == null && contentAggregation == null; + currentHeader = header; + if (currentHeader.isRequest() && currentHeader.needsToReadVariableHeader() == false) { + initializeRequestState(); + } + } + + public void aggregate(ReleasableBytesReference content) { + ensureOpen(); + assert isAggregating(); + if (isShortCircuited() == false) { + if (isFirstContent()) { + firstContent = content.retain(); + } else { + if (contentAggregation == null) { + contentAggregation = new ArrayList<>(4); + assert firstContent != null; + contentAggregation.add(firstContent); + firstContent = null; + } + contentAggregation.add(content.retain()); + } + } + } + + public InboundMessage finishAggregation() throws IOException { + ensureOpen(); + final ReleasableBytesReference releasableContent; + if (isFirstContent()) { + releasableContent = ReleasableBytesReference.wrap(BytesArray.EMPTY); + } else if (contentAggregation == null) { + releasableContent = firstContent; + } else { + final ReleasableBytesReference[] references = contentAggregation.toArray(new ReleasableBytesReference[0]); + final BytesReference content = CompositeBytesReference.of(references); + releasableContent = new ReleasableBytesReference(content, () -> Releasables.close(references)); + } + + final BreakerControl breakerControl = new BreakerControl(circuitBreaker); + final InboundMessage aggregated = new InboundMessage(currentHeader, releasableContent, breakerControl); + boolean success = false; + try { + if (aggregated.getHeader().needsToReadVariableHeader()) { + aggregated.getHeader().finishParsingHeader(aggregated.openOrGetStreamInput()); + if (aggregated.getHeader().isRequest()) { + initializeRequestState(); + } + } + if (isShortCircuited() == false) { + checkBreaker(aggregated.getHeader(), aggregated.getContentLength(), breakerControl); + } + if (isShortCircuited()) { + aggregated.close(); + success = true; + return new InboundMessage(aggregated.getHeader(), aggregationException); + } else { + success = true; + return aggregated; + } + } finally { + resetCurrentAggregation(); + if (success == false) { + aggregated.close(); + } + } + } + + public boolean isAggregating() { + return currentHeader != null; + } + + private void shortCircuit(Exception exception) { + this.aggregationException = exception; + } + + private boolean isShortCircuited() { + return aggregationException != null; + } + + private boolean isFirstContent() { + return firstContent == null && contentAggregation == null; + } + + @Override + public void close() { + isClosed = true; + closeCurrentAggregation(); + } + + private void closeCurrentAggregation() { + releaseContent(); + resetCurrentAggregation(); + } + + private void releaseContent() { + if (contentAggregation == null) { + Releasables.close(firstContent); + } else { + Releasables.close(contentAggregation); + } + } + + private void resetCurrentAggregation() { + firstContent = null; + contentAggregation = null; + currentHeader = null; + aggregationException = null; + canTripBreaker = true; + } + + private void ensureOpen() { + if (isClosed) { + throw new IllegalStateException("Aggregator is already closed"); + } + } + + private void initializeRequestState() { + assert currentHeader.needsToReadVariableHeader() == false; + assert currentHeader.isRequest(); + if (currentHeader.isHandshake()) { + canTripBreaker = false; + return; + } + + final String actionName = currentHeader.getActionName(); + try { + canTripBreaker = requestCanTripBreaker.test(actionName); + } catch (ActionNotFoundTransportException e) { + shortCircuit(e); + } + } + + private void checkBreaker(final Header header, final int contentLength, final BreakerControl breakerControl) { + if (header.isRequest() == false) { + return; + } + assert header.needsToReadVariableHeader() == false; + + if (canTripBreaker) { + try { + circuitBreaker.get().addEstimateBytesAndMaybeBreak(contentLength, header.getActionName()); + breakerControl.setReservedBytes(contentLength); + } catch (CircuitBreakingException e) { + shortCircuit(e); + } + } else { + circuitBreaker.get().addWithoutBreaking(contentLength); + breakerControl.setReservedBytes(contentLength); + } + } + + private static class BreakerControl implements Releasable { + + private static final int CLOSED = -1; + + private final Supplier circuitBreaker; + private final AtomicInteger bytesToRelease = new AtomicInteger(0); + + private BreakerControl(Supplier circuitBreaker) { + this.circuitBreaker = circuitBreaker; + } + + private void setReservedBytes(int reservedBytes) { + final boolean set = bytesToRelease.compareAndSet(0, reservedBytes); + assert set : "Expected bytesToRelease to be 0, found " + bytesToRelease.get(); + } + + @Override + public void close() { + final int toRelease = bytesToRelease.getAndSet(CLOSED); + assert toRelease != CLOSED; + if (toRelease > 0) { + circuitBreaker.get().addWithoutBreaking(-toRelease); + } + } + } +} diff --git a/src/main/java/transportservice/transport/InboundDecoder.java b/src/main/java/transportservice/transport/InboundDecoder.java new file mode 100644 index 0000000..c8454e5 --- /dev/null +++ b/src/main/java/transportservice/transport/InboundDecoder.java @@ -0,0 +1,233 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.Version; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.transport.TcpHeader; +import org.opensearch.transport.TransportDecompressor; +import transportservice.TcpTransport; + +import java.io.IOException; +import java.util.function.Consumer; + +public class InboundDecoder implements Releasable { + + static final Object PING = new Object(); + static final Object END_CONTENT = new Object(); + + private final Version version; + private final PageCacheRecycler recycler; + private TransportDecompressor decompressor; + private int totalNetworkSize = -1; + private int bytesConsumed = 0; + private boolean isClosed = false; + + public InboundDecoder(Version version, PageCacheRecycler recycler) { + this.version = version; + this.recycler = recycler; + } + + public int decode(ReleasableBytesReference reference, Consumer fragmentConsumer) throws IOException { + ensureOpen(); + try { + return internalDecode(reference, fragmentConsumer); + } catch (Exception e) { + cleanDecodeState(); + throw e; + } + } + + public int internalDecode(ReleasableBytesReference reference, Consumer fragmentConsumer) throws IOException { + if (isOnHeader()) { + int messageLength = TcpTransport.readMessageLength(reference); + if (messageLength == -1) { + return 0; + } else if (messageLength == 0) { + fragmentConsumer.accept(PING); + return 6; + } else { + int headerBytesToRead = headerBytesToRead(reference); + if (headerBytesToRead == 0) { + return 0; + } else { + totalNetworkSize = messageLength + TcpHeader.BYTES_REQUIRED_FOR_MESSAGE_SIZE; + + Header header = readHeader(version, messageLength, reference); + bytesConsumed += headerBytesToRead; + if (header.isCompressed()) { + decompressor = new TransportDecompressor(recycler); + } + fragmentConsumer.accept(header); + + if (isDone()) { + finishMessage(fragmentConsumer); + } + return headerBytesToRead; + } + } + } else { + // There are a minimum number of bytes required to start decompression + if (decompressor != null && decompressor.canDecompress(reference.length()) == false) { + return 0; + } + int bytesToConsume = Math.min(reference.length(), totalNetworkSize - bytesConsumed); + bytesConsumed += bytesToConsume; + ReleasableBytesReference retainedContent; + if (isDone()) { + retainedContent = reference.retainedSlice(0, bytesToConsume); + } else { + retainedContent = reference.retain(); + } + if (decompressor != null) { + decompress(retainedContent); + ReleasableBytesReference decompressed; + while ((decompressed = decompressor.pollDecompressedPage()) != null) { + fragmentConsumer.accept(decompressed); + } + } else { + fragmentConsumer.accept(retainedContent); + } + if (isDone()) { + finishMessage(fragmentConsumer); + } + + return bytesToConsume; + } + } + + @Override + public void close() { + isClosed = true; + cleanDecodeState(); + } + + private void finishMessage(Consumer fragmentConsumer) { + cleanDecodeState(); + fragmentConsumer.accept(END_CONTENT); + } + + private void cleanDecodeState() { + IOUtils.closeWhileHandlingException(decompressor); + decompressor = null; + totalNetworkSize = -1; + bytesConsumed = 0; + } + + private void decompress(ReleasableBytesReference content) throws IOException { + try (ReleasableBytesReference toRelease = content) { + int consumed = decompressor.decompress(content); + assert consumed == content.length(); + } + } + + private boolean isDone() { + return bytesConsumed == totalNetworkSize; + } + + private int headerBytesToRead(BytesReference reference) { + System.out.println("REFERENCE LENGTH " + reference.length() + " " + reference.utf8ToString()); + if (reference.length() < TcpHeader.BYTES_REQUIRED_FOR_VERSION) { + return 0; + } + + Version remoteVersion = Version.fromId(reference.getInt(TcpHeader.VERSION_POSITION)); + int fixedHeaderSize = TcpHeader.headerSize(remoteVersion); + if (fixedHeaderSize > reference.length()) { + return 0; + } else if (remoteVersion.before(TcpHeader.VERSION_WITH_HEADER_SIZE)) { + return fixedHeaderSize; + } else { + int variableHeaderSize = reference.getInt(TcpHeader.VARIABLE_HEADER_SIZE_POSITION); + System.out.println("VARIABLE SIZE " + variableHeaderSize); + int totalHeaderSize = fixedHeaderSize + variableHeaderSize; + if (totalHeaderSize > reference.length()) { + return 0; + } else { + return totalHeaderSize; + } + } + } + + // exposed for use in tests + static Header readHeader(Version version, int networkMessageSize, BytesReference bytesReference) throws IOException { + try (StreamInput streamInput = bytesReference.streamInput()) { + streamInput.skip(TcpHeader.BYTES_REQUIRED_FOR_MESSAGE_SIZE); + long requestId = streamInput.readLong(); + byte status = streamInput.readByte(); + Version remoteVersion = Version.fromId(streamInput.readInt()); + Header header = new Header(networkMessageSize, requestId, status, remoteVersion); + final IllegalStateException invalidVersion = ensureVersionCompatibility(remoteVersion, version, header.isHandshake()); + if (invalidVersion != null) { + throw invalidVersion; + } else { + if (remoteVersion.onOrAfter(TcpHeader.VERSION_WITH_HEADER_SIZE)) { + // Skip since we already have ensured enough data available + streamInput.readInt(); + header.finishParsingHeader(streamInput); + } + } + return header; + } + } + + private boolean isOnHeader() { + return totalNetworkSize == -1; + } + + private void ensureOpen() { + if (isClosed) { + throw new IllegalStateException("Decoder is already closed"); + } + } + + static IllegalStateException ensureVersionCompatibility(Version remoteVersion, Version currentVersion, boolean isHandshake) { + // for handshakes we are compatible with N-2 since otherwise we can't figure out our initial version + // since we are compatible with N-1 and N+1 so we always send our minCompatVersion as the initial version in the + // handshake. This looks odd but it's required to establish the connection correctly we check for real compatibility + // once the connection is established + final Version compatibilityVersion = isHandshake ? currentVersion.minimumCompatibilityVersion() : currentVersion; + if ((currentVersion.equals(Version.V_2_0_0) && remoteVersion.equals(Version.fromId(6079999))) == false + && remoteVersion.isCompatible(compatibilityVersion) == false) { + final Version minCompatibilityVersion = isHandshake ? compatibilityVersion : compatibilityVersion.minimumCompatibilityVersion(); + String msg = "Received " + (isHandshake ? "handshake " : "") + "message from unsupported version: ["; + return new IllegalStateException(msg + remoteVersion + "] minimal compatible version is: [" + minCompatibilityVersion + "]"); + } + return null; + } +} diff --git a/src/main/java/transportservice/transport/InboundHandler.java b/src/main/java/transportservice/transport/InboundHandler.java new file mode 100644 index 0000000..df3c1ae --- /dev/null +++ b/src/main/java/transportservice/transport/InboundHandler.java @@ -0,0 +1,459 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.util.BytesRef; +import org.opensearch.Version; +import org.opensearch.common.io.stream.ByteBufferStreamInput; +import org.opensearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.opensearch.common.io.stream.NamedWriteableRegistry; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractRunnable; +import org.opensearch.common.util.concurrent.ThreadContext; +import org.opensearch.threadpool.ThreadPool; + + +import org.opensearch.transport.*; + +import java.io.EOFException; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.ByteBuffer; + +public class InboundHandler { + + private static final Logger logger = LogManager.getLogger(InboundHandler.class); + + private final ThreadPool threadPool; + private final OutboundHandler outboundHandler; + private final NamedWriteableRegistry namedWriteableRegistry; + private final TransportHandshaker handshaker; + private final TransportKeepAlive keepAlive; + private final Transport.ResponseHandlers responseHandlers; + private final Transport.RequestHandlers requestHandlers; + + private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; + + private volatile long slowLogThresholdMs = Long.MAX_VALUE; + + public InboundHandler( + ThreadPool threadPool, + OutboundHandler outboundHandler, + NamedWriteableRegistry namedWriteableRegistry, + TransportHandshaker handshaker, + TransportKeepAlive keepAlive, + Transport.RequestHandlers requestHandlers, + Transport.ResponseHandlers responseHandlers + ) { + this.threadPool = threadPool; + this.outboundHandler = outboundHandler; + this.namedWriteableRegistry = namedWriteableRegistry; + this.handshaker = handshaker; + this.keepAlive = keepAlive; + this.requestHandlers = requestHandlers; + this.responseHandlers = responseHandlers; + } + + public void setMessageListener(TransportMessageListener listener) { + if (messageListener == TransportMessageListener.NOOP_LISTENER) { + messageListener = listener; + } else { + throw new IllegalStateException("Cannot set message listener twice"); + } + } + + public void setSlowLogThreshold(TimeValue slowLogThreshold) { + this.slowLogThresholdMs = slowLogThreshold.getMillis(); + } + + public void inboundMessage(TcpChannel channel, InboundMessage message) throws Exception { + final long startTime = threadPool.relativeTimeInMillis(); + channel.getChannelStats().markAccessed(startTime); + TransportLogger.logInboundMessage(channel, message); + + if (message.isPing()) { + keepAlive.receiveKeepAlive(channel); + } else { + messageReceived(channel, message, startTime); + } + } + + // Empty stream constant to avoid instantiating a new stream for empty messages. + private static final StreamInput EMPTY_STREAM_INPUT = new ByteBufferStreamInput(ByteBuffer.wrap(BytesRef.EMPTY_BYTES)); + + private void messageReceived(TcpChannel channel, InboundMessage message, long startTime) throws IOException { + final InetSocketAddress remoteAddress = channel.getRemoteAddress(); + final Header header = message.getHeader(); + assert header.needsToReadVariableHeader() == false; + + ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext existing = threadContext.stashContext()) { + // Place the context with the headers from the message + threadContext.setHeaders(header.getHeaders()); + threadContext.putTransient("_remote_address", remoteAddress); + if (header.isRequest()) { + handleRequest(channel, header, message); + } else { + // Responses do not support short circuiting currently + assert message.isShortCircuit() == false; + final TransportResponseHandler handler; + long requestId = header.getRequestId(); + if (header.isHandshake()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + TransportResponseHandler theHandler = responseHandlers.onResponseReceived( + requestId, + messageListener + ); + if (theHandler == null && header.isError()) { + handler = handshaker.removeHandlerForHandshake(requestId); + } else { + handler = theHandler; + } + } + // ignore if its null, the service logs it + if (handler != null) { + final StreamInput streamInput; + if (message.getContentLength() > 0 || header.getVersion().equals(Version.CURRENT) == false) { + streamInput = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(streamInput, header.getVersion()); + if (header.isError()) { + handlerResponseError(requestId, streamInput, handler); + } else { + handleResponse(requestId, remoteAddress, streamInput, handler); + } + } else { + assert header.isError() == false; + handleResponse(requestId, remoteAddress, EMPTY_STREAM_INPUT, handler); + } + } + } + } finally { + final long took = threadPool.relativeTimeInMillis() - startTime; + final long logThreshold = slowLogThresholdMs; + if (logThreshold > 0 && took > logThreshold) { + logger.warn( + "handling inbound transport message [{}] took [{}ms] which is above the warn threshold of [{}ms]", + message, + took, + logThreshold + ); + } + } + } + + private void handleRequest(TcpChannel channel, Header header, InboundMessage message) throws IOException { + final String action = header.getActionName(); + final long requestId = header.getRequestId(); + final Version version = header.getVersion(); + if (header.isHandshake()) { + messageListener.onRequestReceived(requestId, action); + // Cannot short circuit handshakes + assert message.isShortCircuit() == false; + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final TransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + null, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + try { + handshaker.handleHandshake(transportChannel, requestId, stream); + } catch (Exception e) { + if (Version.CURRENT.isCompatible(header.getVersion())) { + sendErrorResponse(action, transportChannel, e); + } else { + logger.warn( + new ParameterizedMessage( + "could not send error response to handshake received on [{}] using wire format version [{}], closing channel", + channel, + header.getVersion() + ), + e + ); + channel.close(); + } + } + } else { + final TransportChannel transportChannel = new TcpTransportChannel( + outboundHandler, + null, + action, + requestId, + version, + header.getFeatures(), + header.isCompressed(), + header.isHandshake(), + message.takeBreakerReleaseControl() + ); + try { + messageListener.onRequestReceived(requestId, action); + if (message.isShortCircuit()) { + sendErrorResponse(action, transportChannel, message.getException()); + } else { + final StreamInput stream = namedWriteableStream(message.openOrGetStreamInput()); + assertRemoteVersion(stream, header.getVersion()); + final RequestHandlerRegistry reg = requestHandlers.getHandler(action); + assert reg != null; + + final T request = newRequest(requestId, action, stream, reg); + request.remoteAddress(new TransportAddress(channel.getRemoteAddress())); + checkStreamIsFullyConsumed(requestId, action, stream); + + final String executor = reg.getExecutor(); + if (ThreadPool.Names.SAME.equals(executor)) { + try { + reg.processMessageReceived(request, transportChannel); + } catch (Exception e) { + sendErrorResponse(reg.getAction(), transportChannel, e); + } + } else { + threadPool.executor(executor).execute(new RequestHandler<>(reg, request, transportChannel)); + } + } + } catch (Exception e) { + sendErrorResponse(action, transportChannel, e); + } + } + } + + /** + * Creates new request instance out of input stream. Throws IllegalStateException if the end of + * the stream was reached before the request is fully deserialized from the stream. + * @param transport request type + * @param requestId request identifier + * @param action action name + * @param stream stream + * @param reg request handler registry + * @return new request instance + * @throws IOException IOException + * @throws IllegalStateException IllegalStateException + */ + private T newRequest( + final long requestId, + final String action, + final StreamInput stream, + final RequestHandlerRegistry reg + ) throws IOException { + try { + return reg.newRequest(stream); + } catch (final EOFException e) { + // Another favor of (de)serialization issues is when stream contains less bytes than + // the request handler needs to deserialize the payload. + throw new IllegalStateException( + "Message fully read (request) but more data is expected for requestId [" + + requestId + + "], action [" + + action + + "]; resetting", + e + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param action action name + * @param stream stream + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed(final long requestId, final String action, final StreamInput stream) throws IOException { + // in case we throw an exception, i.e. when the limit is hit, we don't want to verify + final int nextByte = stream.read(); + + // calling read() is useful to make sure the message is fully read, even if there some kind of EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (request) for requestId [" + + requestId + + "], action [" + + action + + "], available [" + + stream.available() + + "]; resetting" + ); + } + } + + /** + * Checks if the stream is fully consumed and throws the exceptions if that is not the case. + * @param requestId request identifier + * @param handler response handler + * @param stream stream + * @param error "true" if response represents error, "false" otherwise + * @throws IOException IOException + */ + private void checkStreamIsFullyConsumed( + final long requestId, + final TransportResponseHandler handler, + final StreamInput stream, + final boolean error + ) throws IOException { + if (stream != EMPTY_STREAM_INPUT) { + // Check the entire message has been read + final int nextByte = stream.read(); + // calling read() is useful to make sure the message is fully read, even if there is an EOS marker + if (nextByte != -1) { + throw new IllegalStateException( + "Message not fully read (response) for requestId [" + + requestId + + "], handler [" + + handler + + "], error [" + + error + + "]; resetting" + ); + } + } + } + + private static void sendErrorResponse(String actionName, TransportChannel transportChannel, Exception e) { + try { + transportChannel.sendResponse(e); + } catch (Exception inner) { + inner.addSuppressed(e); + logger.warn(() -> new ParameterizedMessage("Failed to send error message back to client for action [{}]", actionName), inner); + } + } + + private void handleResponse( + final long requestId, + InetSocketAddress remoteAddress, + final StreamInput stream, + final TransportResponseHandler handler + ) { + final T response; + try { + response = handler.read(stream); + response.remoteAddress(new TransportAddress(remoteAddress)); + checkStreamIsFullyConsumed(requestId, handler, stream, false); + } catch (Exception e) { + final Exception serializationException = new TransportSerializationException( + "Failed to deserialize response from handler [" + handler + "]", + e + ); + logger.warn(new ParameterizedMessage("Failed to deserialize response from [{}]", remoteAddress), serializationException); + handleException(handler, serializationException); + return; + } + final String executor = handler.executor(); + if (ThreadPool.Names.SAME.equals(executor)) { + doHandleResponse(handler, response); + } else { + threadPool.executor(executor).execute(() -> doHandleResponse(handler, response)); + } + } + + private void doHandleResponse(TransportResponseHandler handler, T response) { + try { + handler.handleResponse(response); + } catch (Exception e) { + handleException(handler, new ResponseHandlerFailureTransportException(e)); + } + } + + private void handlerResponseError(final long requestId, StreamInput stream, final TransportResponseHandler handler) { + Exception error; + try { + error = stream.readException(); + checkStreamIsFullyConsumed(requestId, handler, stream, true); + } catch (Exception e) { + error = new TransportSerializationException( + "Failed to deserialize exception response from stream for handler [" + handler + "]", + e + ); + } + handleException(handler, error); + } + + private void handleException(final TransportResponseHandler handler, Throwable error) { + if (!(error instanceof RemoteTransportException)) { + error = new RemoteTransportException(error.getMessage(), error); + } + final RemoteTransportException rtx = (RemoteTransportException) error; + threadPool.executor(handler.executor()).execute(() -> { + try { + handler.handleException(rtx); + } catch (Exception e) { + logger.error(() -> new ParameterizedMessage("failed to handle exception response [{}]", handler), e); + } + }); + } + + private StreamInput namedWriteableStream(StreamInput delegate) { + return new NamedWriteableAwareStreamInput(delegate, namedWriteableRegistry); + } + + static void assertRemoteVersion(StreamInput in, Version version) { + assert version.equals(in.getVersion()) : "Stream version [" + in.getVersion() + "] does not match version [" + version + "]"; + } + + private static class RequestHandler extends AbstractRunnable { + private final RequestHandlerRegistry reg; + private final T request; + private final TransportChannel transportChannel; + + RequestHandler(RequestHandlerRegistry reg, T request, TransportChannel transportChannel) { + this.reg = reg; + this.request = request; + this.transportChannel = transportChannel; + } + + @Override + protected void doRun() throws Exception { + reg.processMessageReceived(request, transportChannel); + } + + @Override + public boolean isForceExecution() { + return reg.isForceExecution(); + } + + @Override + public void onFailure(Exception e) { + sendErrorResponse(reg.getAction(), transportChannel, e); + } + } +} diff --git a/src/main/java/transport/InboundMessage.java b/src/main/java/transportservice/transport/InboundMessage.java similarity index 99% rename from src/main/java/transport/InboundMessage.java rename to src/main/java/transportservice/transport/InboundMessage.java index 7bb6d4e..9f959f4 100644 --- a/src/main/java/transport/InboundMessage.java +++ b/src/main/java/transportservice/transport/InboundMessage.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package transport; +package transportservice.transport; import org.opensearch.common.bytes.ReleasableBytesReference; import org.opensearch.common.io.stream.StreamInput; diff --git a/src/main/java/transportservice/transport/InboundPipeline.java b/src/main/java/transportservice/transport/InboundPipeline.java new file mode 100644 index 0000000..359436c --- /dev/null +++ b/src/main/java/transportservice/transport/InboundPipeline.java @@ -0,0 +1,214 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.Version; +import org.opensearch.common.breaker.CircuitBreaker; +import org.opensearch.common.bytes.CompositeBytesReference; +import org.opensearch.common.bytes.ReleasableBytesReference; +import org.opensearch.common.lease.Releasable; +import org.opensearch.common.lease.Releasables; +import org.opensearch.common.util.PageCacheRecycler; +import org.opensearch.transport.*; + +import java.io.IOException; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.function.LongSupplier; +import java.util.function.Supplier; + +public class InboundPipeline implements Releasable { + + private static final ThreadLocal> fragmentList = ThreadLocal.withInitial(ArrayList::new); + private static final InboundMessage PING_MESSAGE = new InboundMessage(null, true); + + private final LongSupplier relativeTimeInMillis; + private final StatsTracker statsTracker; + private final InboundDecoder decoder; + private final InboundAggregator aggregator; + private final BiConsumer messageHandler; + private Exception uncaughtException; + private final ArrayDeque pending = new ArrayDeque<>(2); + private boolean isClosed = false; + + public InboundPipeline( + Version version, + StatsTracker statsTracker, + PageCacheRecycler recycler, + LongSupplier relativeTimeInMillis, + Supplier circuitBreaker, + Function> registryFunction, + BiConsumer messageHandler + ) { + this( + statsTracker, + relativeTimeInMillis, + new InboundDecoder(version, recycler), + new InboundAggregator(circuitBreaker, registryFunction), + messageHandler + ); + } + + public InboundPipeline( + StatsTracker statsTracker, + LongSupplier relativeTimeInMillis, + InboundDecoder decoder, + InboundAggregator aggregator, + BiConsumer messageHandler + ) { + this.relativeTimeInMillis = relativeTimeInMillis; + this.statsTracker = statsTracker; + this.decoder = decoder; + this.aggregator = aggregator; + this.messageHandler = messageHandler; + } + + @Override + public void close() { + isClosed = true; + Releasables.closeWhileHandlingException(decoder, aggregator); + Releasables.closeWhileHandlingException(pending); + pending.clear(); + } + + public void handleBytes(TcpChannel channel, ReleasableBytesReference reference) throws IOException { + if (uncaughtException != null) { + throw new IllegalStateException("Pipeline state corrupted by uncaught exception", uncaughtException); + } + try { + doHandleBytes(channel, reference); + } catch (Exception e) { + uncaughtException = e; + throw e; + } + } + + public void doHandleBytes(TcpChannel channel, ReleasableBytesReference reference) throws IOException { + channel.getChannelStats().markAccessed(relativeTimeInMillis.getAsLong()); + statsTracker.markBytesRead(reference.length()); + pending.add(reference.retain()); + + final ArrayList fragments = fragmentList.get(); + boolean continueHandling = true; + + while (continueHandling && isClosed == false) { + boolean continueDecoding = true; + while (continueDecoding && pending.isEmpty() == false) { + try (ReleasableBytesReference toDecode = getPendingBytes()) { + final int bytesDecoded = decoder.decode(toDecode, fragments::add); + if (bytesDecoded != 0) { + releasePendingBytes(bytesDecoded); + if (fragments.isEmpty() == false && endOfMessage(fragments.get(fragments.size() - 1))) { + continueDecoding = false; + } + } else { + continueDecoding = false; + } + } + } + + if (fragments.isEmpty()) { + continueHandling = false; + } else { + try { + forwardFragments(channel, fragments); + } finally { + for (Object fragment : fragments) { + if (fragment instanceof ReleasableBytesReference) { + ((ReleasableBytesReference) fragment).close(); + } + } + fragments.clear(); + } + } + } + } + + private void forwardFragments(TcpChannel channel, ArrayList fragments) throws IOException { + for (Object fragment : fragments) { + if (fragment instanceof Header) { + assert aggregator.isAggregating() == false; + aggregator.headerReceived((Header) fragment); + } else if (fragment == InboundDecoder.PING) { + assert aggregator.isAggregating() == false; + messageHandler.accept(channel, PING_MESSAGE); + } else if (fragment == InboundDecoder.END_CONTENT) { + assert aggregator.isAggregating(); + try (InboundMessage aggregated = aggregator.finishAggregation()) { + statsTracker.markMessageReceived(); + messageHandler.accept(channel, aggregated); + } + } else { + assert aggregator.isAggregating(); + assert fragment instanceof ReleasableBytesReference; + aggregator.aggregate((ReleasableBytesReference) fragment); + } + } + } + + private boolean endOfMessage(Object fragment) { + return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception; + } + + private ReleasableBytesReference getPendingBytes() { + if (pending.size() == 1) { + return pending.peekFirst().retain(); + } else { + final ReleasableBytesReference[] bytesReferences = new ReleasableBytesReference[pending.size()]; + int index = 0; + for (ReleasableBytesReference pendingReference : pending) { + bytesReferences[index] = pendingReference.retain(); + ++index; + } + final Releasable releasable = () -> Releasables.closeWhileHandlingException(bytesReferences); + return new ReleasableBytesReference(CompositeBytesReference.of(bytesReferences), releasable); + } + } + + private void releasePendingBytes(int bytesConsumed) { + int bytesToRelease = bytesConsumed; + while (bytesToRelease != 0) { + try (ReleasableBytesReference reference = pending.pollFirst()) { + assert reference != null; + if (bytesToRelease < reference.length()) { + pending.addFirst(reference.retainedSlice(bytesToRelease, reference.length() - bytesToRelease)); + bytesToRelease -= bytesToRelease; + } else { + bytesToRelease -= reference.length(); + } + } + } + } +} diff --git a/src/main/java/transport/NetworkMessage.java b/src/main/java/transportservice/transport/NetworkMessage.java similarity index 93% rename from src/main/java/transport/NetworkMessage.java rename to src/main/java/transportservice/transport/NetworkMessage.java index e8d7310..0cc169d 100644 --- a/src/main/java/transport/NetworkMessage.java +++ b/src/main/java/transportservice/transport/NetworkMessage.java @@ -29,14 +29,14 @@ * GitHub history for details. */ -package transport; +package transportservice.transport; import org.opensearch.Version; import org.opensearch.common.io.stream.Writeable; import org.opensearch.common.util.concurrent.ThreadContext; /** - * Represents a transport message sent over the network. Subclasses implement serialization and + * Represents a transportservice.transport message sent over the network. Subclasses implement serialization and * deserialization. */ public abstract class NetworkMessage { diff --git a/src/main/java/transport/OutboundHandler.java b/src/main/java/transportservice/transport/OutboundHandler.java similarity index 97% rename from src/main/java/transport/OutboundHandler.java rename to src/main/java/transportservice/transport/OutboundHandler.java index 522f680..f47723f 100644 --- a/src/main/java/transport/OutboundHandler.java +++ b/src/main/java/transportservice/transport/OutboundHandler.java @@ -1,4 +1,4 @@ -package transport;/* +package transportservice.transport;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to @@ -68,7 +68,7 @@ public final class OutboundHandler { private final BigArrays bigArrays; private volatile TransportMessageListener messageListener = TransportMessageListener.NOOP_LISTENER; - OutboundHandler( + public OutboundHandler( String nodeName, Version version, String[] features, @@ -84,7 +84,7 @@ public final class OutboundHandler { this.bigArrays = bigArrays; } - void sendBytes(TcpChannel channel, BytesReference bytes, ActionListener listener) { + public void sendBytes(TcpChannel channel, BytesReference bytes, ActionListener listener) { SendContext sendContext = new SendContext(channel, () -> bytes, listener); try { internalSend(channel, sendContext); @@ -110,7 +110,7 @@ public void sendRequest( final boolean isHandshake ) throws IOException, TransportException { Version version = Version.min(this.version, channelVersion); - OutboundMessage.Request message = new OutboundMessage.Request( + OutboundMessage.Request message = new transportservice.transport.OutboundMessage.Request( threadPool.getThreadContext(), features, request, diff --git a/src/main/java/transport/OutboundMessage.java b/src/main/java/transportservice/transport/OutboundMessage.java similarity index 83% rename from src/main/java/transport/OutboundMessage.java rename to src/main/java/transportservice/transport/OutboundMessage.java index 27f22ae..39e4912 100644 --- a/src/main/java/transport/OutboundMessage.java +++ b/src/main/java/transportservice/transport/OutboundMessage.java @@ -1,4 +1,4 @@ -package transport;/* +package transportservice.transport;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to @@ -43,7 +43,7 @@ import java.io.IOException; import java.util.Set; -abstract class OutboundMessage extends NetworkMessage { +abstract class OutboundMessage extends transportservice.transport.NetworkMessage { private final Writeable message; @@ -66,7 +66,7 @@ BytesReference serialize(BytesStreamOutput bytesStream) throws IOException { variableHeaderLength = Math.toIntExact(bytesStream.position() - preHeaderPosition); } - try (CompressibleBytesOutputStream stream = new CompressibleBytesOutputStream(bytesStream, TransportStatus.isCompress(status))) { + try (transportservice.transport.CompressibleBytesOutputStream stream = new transportservice.transport.CompressibleBytesOutputStream(bytesStream, transportservice.transport.TransportStatus.isCompress(status))) { stream.setVersion(version); stream.setFeatures(bytesStream.getFeatures()); @@ -86,10 +86,10 @@ protected void writeVariableHeader(StreamOutput stream) throws IOException { threadContext.writeTo(stream); } - protected BytesReference writeMessage(CompressibleBytesOutputStream stream) throws IOException { + protected BytesReference writeMessage(transportservice.transport.CompressibleBytesOutputStream stream) throws IOException { final BytesReference zeroCopyBuffer; - if (message instanceof BytesTransportRequest) { - BytesTransportRequest bRequest = (BytesTransportRequest) message; + if (message instanceof transportservice.transport.BytesTransportRequest) { + transportservice.transport.BytesTransportRequest bRequest = (transportservice.transport.BytesTransportRequest) message; bRequest.writeThin(stream); zeroCopyBuffer = bRequest.bytes; } else if (message instanceof RemoteTransportException) { @@ -141,12 +141,12 @@ protected void writeVariableHeader(StreamOutput stream) throws IOException { private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { byte status = 0; - status = TransportStatus.setRequest(status); + status = transportservice.transport.TransportStatus.setRequest(status); if (compress && OutboundMessage.canCompress(message)) { - status = TransportStatus.setCompress(status); + status = transportservice.transport.TransportStatus.setCompress(status); } if (isHandshake) { - status = TransportStatus.setHandshake(status); + status = transportservice.transport.TransportStatus.setHandshake(status); } return status; @@ -178,15 +178,15 @@ protected void writeVariableHeader(StreamOutput stream) throws IOException { private static byte setStatus(boolean compress, boolean isHandshake, Writeable message) { byte status = 0; - status = TransportStatus.setResponse(status); + status = transportservice.transport.TransportStatus.setResponse(status); if (message instanceof RemoteTransportException) { - status = TransportStatus.setError(status); + status = transportservice.transport.TransportStatus.setError(status); } if (compress) { - status = TransportStatus.setCompress(status); + status = transportservice.transport.TransportStatus.setCompress(status); } if (isHandshake) { - status = TransportStatus.setHandshake(status); + status = transportservice.transport.TransportStatus.setHandshake(status); } return status; diff --git a/src/main/java/transport/TcpChannel.java b/src/main/java/transportservice/transport/TcpChannel.java similarity index 96% rename from src/main/java/transport/TcpChannel.java rename to src/main/java/transportservice/transport/TcpChannel.java index ef5f914..82b8950 100644 --- a/src/main/java/transport/TcpChannel.java +++ b/src/main/java/transportservice/transport/TcpChannel.java @@ -1,4 +1,4 @@ -package transport;/* +package transportservice.transport;/* * SPDX-License-Identifier: Apache-2.0 * * The OpenSearch Contributors require contributions made to @@ -40,7 +40,7 @@ /** * This is a tcp channel representing a single channel connection to another node. It is the base channel - * abstraction used by the {@link TcpTransport} and {@link TransportService}. All tcp transport + * abstraction used by the {@link TcpTransport} and {@link TransportService}. All tcp transportservice.transport * implementations must return channels that adhere to the required method contracts. */ public interface TcpChannel extends CloseableChannel { @@ -105,7 +105,7 @@ public void markAccessed(long relativeMillisTime) { lastAccessedTime = relativeMillisTime; } - long lastAccessedTime() { + public long lastAccessedTime() { return lastAccessedTime; } } diff --git a/src/main/java/transportservice/transport/TcpTransportChannel.java b/src/main/java/transportservice/transport/TcpTransportChannel.java new file mode 100644 index 0000000..43af712 --- /dev/null +++ b/src/main/java/transportservice/transport/TcpTransportChannel.java @@ -0,0 +1,134 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.Version; +import org.opensearch.common.lease.Releasable; +import org.opensearch.search.query.QuerySearchResult; +import org.opensearch.transport.TcpChannel; +import org.opensearch.transport.TransportChannel; +import org.opensearch.transport.TransportResponse; + +import java.io.IOException; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; + +public final class TcpTransportChannel implements TransportChannel { + + private final AtomicBoolean released = new AtomicBoolean(); + private final OutboundHandler outboundHandler; + private final TcpChannel channel; + private final String action; + private final long requestId; + private final Version version; + private final Set features; + private final boolean compressResponse; + private final boolean isHandshake; + private final Releasable breakerRelease; + + TcpTransportChannel( + OutboundHandler outboundHandler, + TcpChannel channel, + String action, + long requestId, + Version version, + Set features, + boolean compressResponse, + boolean isHandshake, + Releasable breakerRelease + ) { + this.version = version; + this.features = features; + this.channel = channel; + this.outboundHandler = outboundHandler; + this.action = action; + this.requestId = requestId; + this.compressResponse = compressResponse; + this.isHandshake = isHandshake; + this.breakerRelease = breakerRelease; + } + + @Override + public String getProfileName() { + return channel.getProfile(); + } + + @Override + public void sendResponse(TransportResponse response) throws IOException { + try { + if (response instanceof QuerySearchResult && ((QuerySearchResult) response).getShardSearchRequest() != null) { + // update outbound network time with current time before sending response over network + ((QuerySearchResult) response).getShardSearchRequest().setOutboundNetworkTime(System.currentTimeMillis()); + } + outboundHandler.sendResponse(version, features, null, requestId, action, response, compressResponse, isHandshake); + } finally { + release(false); + } + } + + @Override + public void sendResponse(Exception exception) throws IOException { + try { + outboundHandler.sendErrorResponse(version, features, null, requestId, action, exception); + } finally { + release(true); + } + } + + private Exception releaseBy; + + private void release(boolean isExceptionResponse) { + if (released.compareAndSet(false, true)) { + assert (releaseBy = new Exception()) != null; // easier to debug if it's already closed + breakerRelease.close(); + } else if (isExceptionResponse == false) { + // only fail if we are not sending an error - we might send the error triggered by the previous + // sendResponse call + throw new IllegalStateException("reserved bytes are already released", releaseBy); + } + } + + @Override + public String getChannelType() { + return "transport"; + } + + @Override + public Version getVersion() { + return version; + } + + public TcpChannel getChannel() { + return channel; + } +} diff --git a/src/main/java/transportservice/transport/Transport.java b/src/main/java/transportservice/transport/Transport.java new file mode 100644 index 0000000..cb769ec --- /dev/null +++ b/src/main/java/transportservice/transport/Transport.java @@ -0,0 +1,289 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.collect.MapBuilder; +import org.opensearch.common.component.LifecycleComponent; +import org.opensearch.common.transport.BoundTransportAddress; +import org.opensearch.common.transport.TransportAddress; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.common.util.concurrent.ConcurrentMapLong; +import org.opensearch.transport.*; + +import java.io.Closeable; +import java.io.IOException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Predicate; + +public interface Transport extends LifecycleComponent { + + /** + * Registers a new request handler + */ + default void registerRequestHandler(RequestHandlerRegistry reg) { + getRequestHandlers().registerHandler(reg); + } + + void setMessageListener(TransportMessageListener listener); + + default void setSlowLogThreshold(TimeValue slowLogThreshold) {} + + default boolean isSecure() { + return false; + } + + /** + * The address the transport is bound on. + */ + BoundTransportAddress boundAddress(); + + /** + * Further profile bound addresses + * @return null iff profiles are unsupported, otherwise a map with name of profile and its bound transport address + */ + Map profileBoundAddresses(); + + /** + * Returns an address from its string representation. + */ + TransportAddress[] addressesFromString(String address) throws UnknownHostException; + + /** + * Returns a list of all local addresses for this transport + */ + List getDefaultSeedAddresses(); + + /** + * Opens a new connection to the given node. When the connection is fully connected, the listener is called. + * The ActionListener will be called on the calling thread or the generic thread pool. + */ + void openConnection(DiscoveryNode node, ConnectionProfile profile, transportservice.action.ActionListener listener); + + TransportStats getStats(); + + ResponseHandlers getResponseHandlers(); + + RequestHandlers getRequestHandlers(); + + /** + * A unidirectional connection to a {@link DiscoveryNode} + */ + interface Connection extends Closeable { + /** + * The node this connection is associated with + */ + DiscoveryNode getNode(); + + /** + * Sends the request to the node this connection is associated with + * @param requestId see {@link ResponseHandlers#add(ResponseContext)} for details + * @param action the action to execute + * @param request the request to send + * @param options request options to apply + * @throws NodeNotConnectedException if the given node is not connected + */ + void sendRequest(long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException, + TransportException; + + /** + * The listener's {@link ActionListener#onResponse(Object)} method will be called when this + * connection is closed. No implementations currently throw an exception during close, so + * {@link ActionListener#onFailure(Exception)} will not be called. + * + * @param listener to be called + */ + void addCloseListener(transportservice.action.ActionListener listener); + + boolean isClosed(); + + /** + * Returns the version of the node this connection was established with. + */ + default Version getVersion() { + return getNode().getVersion(); + } + + /** + * Returns a key that this connection can be cached on. Delegating subclasses must delegate method call to + * the original connection. + */ + default Object getCacheKey() { + return this; + } + + @Override + void close(); + } + + /** + * This class represents a response context that encapsulates the actual response handler, the action and the connection it was + * executed on. + */ + final class ResponseContext { + + private final TransportResponseHandler handler; + + private final Connection connection; + + private final String action; + + ResponseContext(TransportResponseHandler handler, Connection connection, String action) { + this.handler = handler; + this.connection = connection; + this.action = action; + } + + public TransportResponseHandler handler() { + return handler; + } + + public Connection connection() { + return this.connection; + } + + public String action() { + return this.action; + } + } + + /** + * This class is a registry that allows + */ + final class ResponseHandlers { + private final ConcurrentMapLong> handlers = ConcurrentCollections + .newConcurrentMapLongWithAggressiveConcurrency(); + private final AtomicLong requestIdGenerator = new AtomicLong(); + + /** + * Returns true if the give request ID has a context associated with it. + */ + public boolean contains(long requestId) { + return handlers.containsKey(requestId); + } + + /** + * Removes and return the {@link ResponseContext} for the given request ID or returns + * null if no context is associated with this request ID. + */ + public ResponseContext remove(long requestId) { + return handlers.remove(requestId); + } + + /** + * Adds a new response context and associates it with a new request ID. + * @return the new request ID + * @see Connection#sendRequest(long, String, TransportRequest, TransportRequestOptions) + */ + public long add(ResponseContext holder) { + long requestId = newRequestId(); + ResponseContext existing = handlers.put(requestId, holder); + assert existing == null : "request ID already in use: " + requestId; + return requestId; + } + + /** + * Returns a new request ID to use when sending a message via {@link Connection#sendRequest(long, String, + * TransportRequest, TransportRequestOptions)} + */ + long newRequestId() { + return requestIdGenerator.incrementAndGet(); + } + + /** + * Removes and returns all {@link ResponseContext} instances that match the predicate + */ + public List> prune(Predicate> predicate) { + final List> holders = new ArrayList<>(); + for (Map.Entry> entry : handlers.entrySet()) { + ResponseContext holder = entry.getValue(); + if (predicate.test(holder)) { + ResponseContext remove = handlers.remove(entry.getKey()); + if (remove != null) { + holders.add(holder); + } + } + } + return holders; + } + + /** + * called by the {@link Transport} implementation when a response or an exception has been received for a previously + * sent request (before any processing or deserialization was done). Returns the appropriate response handler or null if not + * found. + */ + public TransportResponseHandler onResponseReceived( + final long requestId, + final transportservice.transport.TransportMessageListener listener + ) { + ResponseContext context = handlers.remove(requestId); + listener.onResponseReceived(requestId, context); + if (context == null) { + return null; + } else { + return context.handler(); + } + } + } + + final class RequestHandlers { + + private volatile Map> requestHandlers = Collections.emptyMap(); + + synchronized void registerHandler(RequestHandlerRegistry reg) { + if (requestHandlers.containsKey(reg.getAction())) { + throw new IllegalArgumentException("transport handlers for action " + reg.getAction() + " is already registered"); + } + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + + // TODO: Only visible for testing. Perhaps move StubbableTransport from + // org.opensearch.test.transport to org.opensearch.transport + public synchronized void forceRegister(RequestHandlerRegistry reg) { + requestHandlers = MapBuilder.newMapBuilder(requestHandlers).put(reg.getAction(), reg).immutableMap(); + } + + @SuppressWarnings("unchecked") + public RequestHandlerRegistry getHandler(String action) { + return (RequestHandlerRegistry) requestHandlers.get(action); + } + } +} diff --git a/src/main/java/transportservice/transport/TransportConnectionListener.java b/src/main/java/transportservice/transport/TransportConnectionListener.java new file mode 100644 index 0000000..ae009fa --- /dev/null +++ b/src/main/java/transportservice/transport/TransportConnectionListener.java @@ -0,0 +1,65 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.cluster.node.DiscoveryNode; + +/** + * A listener interface that allows to react on transport events. All methods may be + * executed on network threads. Consumers must fork in the case of long running or blocking + * operations. + */ +public interface TransportConnectionListener { + + /** + * Called once a connection was opened + * @param connection the connection + */ + default void onConnectionOpened(Transport.Connection connection) {} + + /** + * Called once a connection ws closed. + * @param connection the closed connection + */ + default void onConnectionClosed(Transport.Connection connection) {} + + /** + * Called once a node connection is opened and registered. + */ + default void onNodeConnected(DiscoveryNode node, Transport.Connection connection) {} + + /** + * Called once a node connection is closed and unregistered. + */ + default void onNodeDisconnected(DiscoveryNode node, Transport.Connection connection) {} +} diff --git a/src/main/java/transportservice/transport/TransportHandshaker.java b/src/main/java/transportservice/transport/TransportHandshaker.java new file mode 100644 index 0000000..92b15ff --- /dev/null +++ b/src/main/java/transportservice/transport/TransportHandshaker.java @@ -0,0 +1,282 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.LegacyESVersion; +import org.opensearch.Version; +import org.opensearch.action.ActionListener; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.io.stream.StreamOutput; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.*; + +import java.io.EOFException; +import java.io.IOException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Sends and receives transport-level connection handshakes. This class will send the initial handshake, + * manage state/timeouts while the handshake is in transit, and handle the eventual response. + */ +public final class TransportHandshaker { + + public static final String HANDSHAKE_ACTION_NAME = "internal:tcp/handshake"; + private final ConcurrentMap pendingHandshakes = new ConcurrentHashMap<>(); + private final CounterMetric numHandshakes = new CounterMetric(); + + private final Version version; + private final ThreadPool threadPool; + private final HandshakeRequestSender handshakeRequestSender; + + // @todo remove in 3.0.0 + static final Version V_3_0_0 = Version.fromId(3000099 ^ Version.MASK); + + public TransportHandshaker(Version version, ThreadPool threadPool, HandshakeRequestSender handshakeRequestSender) { + this.version = version; + this.threadPool = threadPool; + this.handshakeRequestSender = handshakeRequestSender; + } + + public void sendHandshake(long requestId, DiscoveryNode node, transportservice.transport.TcpChannel channel, TimeValue timeout, ActionListener listener) { + numHandshakes.inc(); + final HandshakeResponseHandler handler = new HandshakeResponseHandler(requestId, version, listener); + pendingHandshakes.put(requestId, handler); + channel.addCloseListener( + ActionListener.wrap(() -> handler.handleLocalException(new TransportException("handshake failed because connection reset"))) + ); + boolean success = false; + try { + // for the request we use the minCompatVersion since we don't know what's the version of the node we talk to + // we also have no payload on the request but the response will contain the actual version of the node we talk + // to as the payload. + Version minCompatVersion = version.minimumCompatibilityVersion(); + if (version.onOrAfter(Version.V_1_0_0) && version.before(Version.V_2_0_0)) { + // the minCompatibleVersion for OpenSearch 1.x is sent as 6.7.99 instead of 6.8.0 + // as this helps in (indirectly) identifying the remote node version during handle HandshakeRequest itself + // and then send appropriate version (7.10.2/ OpenSearch 1.x version) in response. + // The advantage of doing this is early identification of remote node version as otherwise + // if OpenSearch node also sends 6.8.0, there is no way to differentiate ES 7.x version from + // OpenSearch version and OpenSearch node will end up sending BC version to both ES & OpenSearch remote node. + // Sending only BC version to ElasticSearch node provide easy deprecation path for this BC version logic + // in OpenSearch 2.0.0. + minCompatVersion = Version.fromId(6079999); + } else if (version.onOrAfter(Version.V_2_0_0)) { + minCompatVersion = Version.fromId(7099999); + } + handshakeRequestSender.sendRequest(node, channel, requestId, minCompatVersion); + + threadPool.schedule( + () -> handler.handleLocalException(new ConnectTransportException(node, "handshake_timeout[" + timeout + "]")), + timeout, + ThreadPool.Names.GENERIC + ); + success = true; + } catch (Exception e) { + handler.handleLocalException(new ConnectTransportException(node, "failure to send " + HANDSHAKE_ACTION_NAME, e)); + } finally { + if (success == false) { + TransportResponseHandler removed = pendingHandshakes.remove(requestId); + assert removed == null : "Handshake should not be pending if exception was thrown"; + } + } + } + + void handleHandshake(TransportChannel channel, long requestId, StreamInput stream) throws IOException { + // Must read the handshake request to exhaust the stream + HandshakeRequest handshakeRequest = new HandshakeRequest(stream); + final int nextByte = stream.read(); + if (nextByte != -1) { + throw new IllegalStateException( + "Handshake request not fully read for requestId [" + + requestId + + "], action [" + + TransportHandshaker.HANDSHAKE_ACTION_NAME + + "], available [" + + stream.available() + + "]; resetting" + ); + } + // 1. if remote node is 7.x, then StreamInput version would be 6.8.0 + // 2. if remote node is 6.8 then it would be 5.6.0 + // 3. if remote node is OpenSearch 1.x then it would be 6.7.99 + if ((this.version.onOrAfter(Version.V_1_0_0) && this.version.before(V_3_0_0)) + && (stream.getVersion().equals(LegacyESVersion.fromId(6080099)) || stream.getVersion().equals(Version.fromId(5060099)))) { + // send 7.10.2 in response to ensure compatibility w/ Legacy 7.10.x nodes for rolling upgrade support + channel.sendResponse(new HandshakeResponse(LegacyESVersion.V_7_10_2)); + } else { + channel.sendResponse(new HandshakeResponse(this.version)); + } + } + + TransportResponseHandler removeHandlerForHandshake(long requestId) { + return pendingHandshakes.remove(requestId); + } + + public int getNumPendingHandshakes() { + return pendingHandshakes.size(); + } + + public long getNumHandshakes() { + return numHandshakes.count(); + } + + private class HandshakeResponseHandler implements TransportResponseHandler { + + private final long requestId; + private final Version currentVersion; + private final ActionListener listener; + private final AtomicBoolean isDone = new AtomicBoolean(false); + + private HandshakeResponseHandler(long requestId, Version currentVersion, ActionListener listener) { + this.requestId = requestId; + this.currentVersion = currentVersion; + this.listener = listener; + } + + @Override + public HandshakeResponse read(StreamInput in) throws IOException { + return new HandshakeResponse(in); + } + + @Override + public void handleResponse(HandshakeResponse response) { + if (isDone.compareAndSet(false, true)) { + Version version = response.responseVersion; + if (currentVersion.isCompatible(version) == false) { + listener.onFailure( + new IllegalStateException( + "Received message from unsupported version: [" + + version + + "] minimal compatible version is: [" + + currentVersion.minimumCompatibilityVersion() + + "]" + ) + ); + } else { + listener.onResponse(version); + } + } + } + + @Override + public void handleException(TransportException e) { + if (isDone.compareAndSet(false, true)) { + listener.onFailure(new IllegalStateException("handshake failed", e)); + } + } + + void handleLocalException(TransportException e) { + if (removeHandlerForHandshake(requestId) != null && isDone.compareAndSet(false, true)) { + listener.onFailure(e); + } + } + + @Override + public String executor() { + return ThreadPool.Names.SAME; + } + } + + public static final class HandshakeRequest extends TransportRequest { + + private final Version version; + + public HandshakeRequest(Version version) { + this.version = version; + } + + HandshakeRequest(StreamInput streamInput) throws IOException { + super(streamInput); + BytesReference remainingMessage; + try { + remainingMessage = streamInput.readBytesReference(); + } catch (EOFException e) { + remainingMessage = null; + } + if (remainingMessage == null) { + version = null; + } else { + try (StreamInput messageStreamInput = remainingMessage.streamInput()) { + this.version = Version.readVersion(messageStreamInput); + } + } + } + + @Override + public void writeTo(StreamOutput streamOutput) throws IOException { + super.writeTo(streamOutput); + assert version != null; + try (BytesStreamOutput messageStreamOutput = new BytesStreamOutput(4)) { + Version.writeVersion(version, messageStreamOutput); + BytesReference reference = messageStreamOutput.bytes(); + streamOutput.writeBytesReference(reference); + } + } + } + + static final class HandshakeResponse extends TransportResponse { + + private final Version responseVersion; + + HandshakeResponse(Version responseVersion) { + this.responseVersion = responseVersion; + } + + private HandshakeResponse(StreamInput in) throws IOException { + super(in); + responseVersion = Version.readVersion(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + assert responseVersion != null; + Version.writeVersion(responseVersion, out); + } + + Version getResponseVersion() { + return responseVersion; + } + } + + @FunctionalInterface + public interface HandshakeRequestSender { + + void sendRequest(DiscoveryNode node, transportservice.transport.TcpChannel channel, long requestId, Version version) throws IOException; + } +} diff --git a/src/main/java/transportservice/transport/TransportKeepAlive.java b/src/main/java/transportservice/transport/TransportKeepAlive.java new file mode 100644 index 0000000..f0935e8 --- /dev/null +++ b/src/main/java/transportservice/transport/TransportKeepAlive.java @@ -0,0 +1,218 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.opensearch.action.ActionListener; +import org.opensearch.common.AsyncBiFunction; +import org.opensearch.common.bytes.BytesReference; +import org.opensearch.common.component.Lifecycle; +import org.opensearch.common.io.stream.BytesStreamOutput; +import org.opensearch.common.metrics.CounterMetric; +import org.opensearch.common.unit.TimeValue; +import org.opensearch.common.util.concurrent.AbstractLifecycleRunnable; +import org.opensearch.common.util.concurrent.ConcurrentCollections; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.ConnectionProfile; +import org.opensearch.transport.TcpChannel; + +import java.io.Closeable; +import java.io.IOException; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Implements the scheduling and sending of keep alive pings. Client channels send keep alive pings to the + * server and server channels respond. Pings are only sent at the scheduled time if the channel did not send + * and receive a message since the last ping. + */ +public final class TransportKeepAlive implements Closeable { + + public static final int PING_DATA_SIZE = -1; + + private static final BytesReference PING_MESSAGE; + + static { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.writeByte((byte) 'E'); + out.writeByte((byte) 'S'); + out.writeInt(PING_DATA_SIZE); + PING_MESSAGE = out.copyBytes(); + } catch (IOException e) { + throw new AssertionError(e.getMessage(), e); // won't happen + } + } + + private final Logger logger = LogManager.getLogger(TransportKeepAlive.class); + private final CounterMetric successfulPings = new CounterMetric(); + private final CounterMetric failedPings = new CounterMetric(); + private final ConcurrentMap pingIntervals = ConcurrentCollections.newConcurrentMap(); + private final Lifecycle lifecycle = new Lifecycle(); + private final ThreadPool threadPool; + private final AsyncBiFunction pingSender; + + public TransportKeepAlive(ThreadPool threadPool, AsyncBiFunction pingSender) { + this.threadPool = threadPool; + this.pingSender = pingSender; + + this.lifecycle.moveToStarted(); + } + + public void registerNodeConnection(List nodeChannels, ConnectionProfile connectionProfile) { + TimeValue pingInterval = connectionProfile.getPingInterval(); + if (pingInterval.millis() < 0) { + return; + } + + final ScheduledPing scheduledPing = pingIntervals.computeIfAbsent(pingInterval, ScheduledPing::new); + scheduledPing.ensureStarted(); + + for (transportservice.transport.TcpChannel channel : nodeChannels) { + scheduledPing.addChannel(channel); + channel.addCloseListener(ActionListener.wrap(() -> scheduledPing.removeChannel(channel))); + } + } + + /** + * Called when a keep alive ping is received. If the channel that received the keep alive ping is a + * server channel, a ping is sent back. If the channel that received the keep alive is a client channel, + * this method does nothing as the client initiated the ping in the first place. + * + * @param channel that received the keep alive ping + */ + void receiveKeepAlive(transportservice.transport.TcpChannel channel) { + // The client-side initiates pings and the server-side responds. So if this is a client channel, this + // method is a no-op. + if (channel.isServerChannel()) { + sendPing(channel); + } + } + + long successfulPingCount() { + return successfulPings.count(); + } + + long failedPingCount() { + return failedPings.count(); + } + + private void sendPing(transportservice.transport.TcpChannel channel) { + pingSender.apply(channel, PING_MESSAGE, new ActionListener() { + + @Override + public void onResponse(Void v) { + successfulPings.inc(); + } + + @Override + public void onFailure(Exception e) { + if (channel.isOpen()) { + logger.debug(() -> new ParameterizedMessage("[{}] failed to send transport ping", channel), e); + failedPings.inc(); + } else { + logger.trace(() -> new ParameterizedMessage("[{}] failed to send transport ping (channel closed)", channel), e); + } + } + }); + } + + @Override + public void close() { + synchronized (lifecycle) { + lifecycle.moveToStopped(); + lifecycle.moveToClosed(); + } + } + + private class ScheduledPing extends AbstractLifecycleRunnable { + + private final TimeValue pingInterval; + + private final Set channels = ConcurrentCollections.newConcurrentSet(); + + private final AtomicBoolean isStarted = new AtomicBoolean(false); + private volatile long lastPingRelativeMillis; + + private ScheduledPing(TimeValue pingInterval) { + super(lifecycle, logger); + this.pingInterval = pingInterval; + this.lastPingRelativeMillis = threadPool.relativeTimeInMillis(); + } + + void ensureStarted() { + if (isStarted.get() == false && isStarted.compareAndSet(false, true)) { + threadPool.schedule(this, pingInterval, ThreadPool.Names.GENERIC); + } + } + + void addChannel(transportservice.transport.TcpChannel channel) { + channels.add(channel); + } + + void removeChannel(transportservice.transport.TcpChannel channel) { + channels.remove(channel); + } + + @Override + protected void doRunInLifecycle() { + for (transportservice.transport.TcpChannel channel : channels) { + // In the future it is possible that we may want to kill a channel if we have not read from + // the channel since the last ping. However, this will need to be backwards compatible with + // pre-6.6 nodes that DO NOT respond to pings + if (needsKeepAlivePing(channel)) { + sendPing(channel); + } + } + this.lastPingRelativeMillis = threadPool.relativeTimeInMillis(); + } + + @Override + protected void onAfterInLifecycle() { + threadPool.scheduleUnlessShuttingDown(pingInterval, ThreadPool.Names.GENERIC, this); + } + + @Override + public void onFailure(Exception e) { + logger.warn("failed to send ping transport message", e); + } + + private boolean needsKeepAlivePing(transportservice.transport.TcpChannel channel) { + transportservice.transport.TcpChannel.ChannelStats stats = channel.getChannelStats(); + long accessedDelta = stats.lastAccessedTime() - lastPingRelativeMillis; + return accessedDelta <= 0; + } + } +} diff --git a/src/main/java/transport/TransportLogger.java b/src/main/java/transportservice/transport/TransportLogger.java similarity index 98% rename from src/main/java/transport/TransportLogger.java rename to src/main/java/transportservice/transport/TransportLogger.java index 930134c..6a3f889 100644 --- a/src/main/java/transport/TransportLogger.java +++ b/src/main/java/transportservice/transport/TransportLogger.java @@ -29,7 +29,7 @@ * GitHub history for details. */ -package transport; +package transportservice.transport; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -75,7 +75,7 @@ static void logOutboundMessage(TcpChannel channel, BytesReference message) { if (logger.isTraceEnabled()) { try { if (message.get(0) != 'E') { - // This is not an OpenSearch transport message. + // This is not an OpenSearch transportservice.transport message. return; } BytesReference withoutHeader = message.slice(HEADER_SIZE, message.length() - HEADER_SIZE); diff --git a/src/main/java/transportservice/transport/TransportMessageListener.java b/src/main/java/transportservice/transport/TransportMessageListener.java new file mode 100644 index 0000000..c7f27f2 --- /dev/null +++ b/src/main/java/transportservice/transport/TransportMessageListener.java @@ -0,0 +1,90 @@ +/* + * SPDX-License-Identifier: Apache-2.0 + * + * The OpenSearch Contributors require contributions made to + * this file be licensed under the Apache-2.0 license or a + * compatible open source license. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +/* + * Modifications Copyright OpenSearch Contributors. See + * GitHub history for details. + */ + +package transportservice.transport; + +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportRequestOptions; +import org.opensearch.transport.TransportResponse; + +public interface TransportMessageListener { + + TransportMessageListener NOOP_LISTENER = new TransportMessageListener() { + }; + + /** + * Called once a request is received + * @param requestId the internal request ID + * @param action the request action + * + */ + default void onRequestReceived(long requestId, String action) {} + + /** + * Called for every action response sent after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param response the response send + */ + default void onResponseSent(long requestId, String action, TransportResponse response) {} + + /*** + * Called for every failed action response after the response has been passed to the underlying network implementation. + * @param requestId the request ID (unique per client) + * @param action the request action + * @param error the error sent back to the caller + */ + default void onResponseSent(long requestId, String action, Exception error) {} + + /** + * Called for every request sent to a server after the request has been passed to the underlying network implementation + * @param node the node the request was sent to + * @param requestId the internal request id + * @param action the action name + * @param request the actual request + * @param finalOptions the request options + */ + default void onRequestSent( + DiscoveryNode node, + long requestId, + String action, + TransportRequest request, + TransportRequestOptions finalOptions + ) {} + + /** + * Called for every response received + * @param requestId the request id for this reponse + * @param context the response context or null if the context was already processed ie. due to a timeout. + */ + default void onResponseReceived(long requestId, Transport.ResponseContext context) {} +} diff --git a/src/main/java/transport/TransportStatus.java b/src/main/java/transportservice/transport/TransportStatus.java similarity index 98% rename from src/main/java/transport/TransportStatus.java rename to src/main/java/transportservice/transport/TransportStatus.java index 9596ef8..53c77b2 100644 --- a/src/main/java/transport/TransportStatus.java +++ b/src/main/java/transportservice/transport/TransportStatus.java @@ -30,7 +30,7 @@ * GitHub history for details. */ -package transport; +package transportservice.transport; public final class TransportStatus { diff --git a/src/main/resources/log4j2.xml b/src/main/resources/log4j2.xml index 765ccff..4c7aba0 100644 --- a/src/main/resources/log4j2.xml +++ b/src/main/resources/log4j2.xml @@ -9,7 +9,7 @@ - + diff --git a/src/test/java/TestConnection.java b/src/test/java/TestConnection.java new file mode 100644 index 0000000..818b988 --- /dev/null +++ b/src/test/java/TestConnection.java @@ -0,0 +1,210 @@ +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; +import org.junit.jupiter.api.Test; +import org.opensearch.Version; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.common.Nullable; +import org.opensearch.common.io.stream.StreamInput; +import org.opensearch.common.settings.ClusterSettings; +import org.opensearch.common.settings.Setting; +import org.opensearch.common.settings.Settings; +import org.opensearch.core.internal.io.IOUtils; +import org.opensearch.node.Node; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransportService; +import org.opensearch.threadpool.TestThreadPool; +import org.opensearch.threadpool.ThreadPool; +import org.opensearch.transport.*; +import transportservice.transport.Transport; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static transportservice.TransportService.NOOP_TRANSPORT_INTERCEPTOR; +import transportservice.TcpTransport; + +import java.io.IOException; +import java.util.Collections; +import java.util.Set; + + +public abstract class TestConnection extends OpenSearchTestCase { + protected volatile MockTransportService serviceA; + private static final Version CURRENT_VERSION = Version.fromString(String.valueOf(Version.CURRENT.major) + ".0.0"); + protected static final Version version0 = CURRENT_VERSION.minimumCompatibilityVersion(); + protected ClusterSettings clusterSettingsA; + protected ThreadPool threadPool; + protected volatile DiscoveryNode nodeA; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + + threadPool = new TestThreadPool(getClass().getName()); + final Settings.Builder connectionSettingsBuilder = Settings.builder() + .put(TransportSettings.CONNECTIONS_PER_NODE_RECOVERY.getKey(), 1) + .put(TransportSettings.CONNECTIONS_PER_NODE_BULK.getKey(), 1) + .put(TransportSettings.CONNECTIONS_PER_NODE_REG.getKey(), 2) + .put(TransportSettings.CONNECTIONS_PER_NODE_STATE.getKey(), 1) + .put(TransportSettings.CONNECTIONS_PER_NODE_PING.getKey(), 1); + + connectionSettingsBuilder.put(TransportSettings.TCP_KEEP_ALIVE.getKey(), randomBoolean()); + if (randomBoolean()) { + connectionSettingsBuilder.put(TransportSettings.TCP_KEEP_IDLE.getKey(), randomIntBetween(1, 300)); + } + if (randomBoolean()) { + connectionSettingsBuilder.put(TransportSettings.TCP_KEEP_INTERVAL.getKey(), randomIntBetween(1, 300)); + } + if (randomBoolean()) { + connectionSettingsBuilder.put(TransportSettings.TCP_KEEP_COUNT.getKey(), randomIntBetween(1, 10)); + } + + final Settings connectionSettings = connectionSettingsBuilder.build(); + serviceA = buildService("TS_A", version0, clusterSettingsA, connectionSettings); // this one supports dynamic tracer updates + nodeA = serviceA.getLocalNode(); + } + + protected Set> getSupportedSettings() { + return ClusterSettings.BUILT_IN_CLUSTER_SETTINGS; + } + + protected abstract Transport build(Settings settings, Version version, ClusterSettings clusterSettings, boolean doHandshake); + + private MockTransportService buildService( + final String name, + final Version version, + @Nullable ClusterSettings clusterSettings, + Settings settings, + boolean acceptRequests, + boolean doHandshake, + TransportInterceptor interceptor + ) { + Settings updatedSettings = Settings.builder() + .put(TransportSettings.PORT.getKey(), getPortRange()) + .put(settings) + .put(Node.NODE_NAME_SETTING.getKey(), name) + .build(); + if (clusterSettings == null) { + clusterSettings = new ClusterSettings(updatedSettings, getSupportedSettings()); + } + Transport transport = build(updatedSettings, version, clusterSettings, doHandshake); + MockTransportService service = MockTransportService.createNewService( + updatedSettings, + (org.opensearch.transport.Transport) transport, + version, + threadPool, + clusterSettings, + Collections.emptySet(), + interceptor + ); + service.start(); + if (acceptRequests) { + service.acceptIncomingRequests(); + } + return service; + } + + private MockTransportService buildService( + final String name, + final Version version, + @Nullable ClusterSettings clusterSettings, + Settings settings, + boolean acceptRequests, + boolean doHandshake + ) { + return buildService(name, version, clusterSettings, settings, acceptRequests, doHandshake, (TransportInterceptor) NOOP_TRANSPORT_INTERCEPTOR); + } + + protected MockTransportService buildService(final String name, final Version version, Settings settings) { + return buildService(name, version, null, settings); + } + + protected MockTransportService buildService( + final String name, + final Version version, + ClusterSettings clusterSettings, + Settings settings + ) { + return buildService(name, version, clusterSettings, settings, true, true); + } + + public void assertNoPendingHandshakes(Transport transport) { + if (transport instanceof TcpTransport) { + assertEquals(0, ((TcpTransport) transport).getNumPendingHandshakes()); + } + } + +// @Override +// @After +// public void tearDown() throws Exception { +// super.tearDown(); +// try { +// assertNoPendingHandshakes(serviceA.getOriginalTransport()); +// assertNoPendingHandshakes(serviceB.getOriginalTransport()); +// } finally { +// IOUtils.close(serviceA, serviceB, () -> terminate(threadPool)); +// } +// } + + @Test + public void testVoidMessageCompressed() { + try (MockTransportService serviceC = buildService("TS_C", CURRENT_VERSION, Settings.EMPTY)) { + serviceC.start(); + serviceC.acceptIncomingRequests(); + + serviceA.registerRequestHandler( + "internal:sayHello", + ThreadPool.Names.GENERIC, + TransportRequest.Empty::new, + (request, channel, task) -> { + try { + channel.sendResponse(TransportResponse.Empty.INSTANCE); + } catch (IOException e) { + logger.error("Unexpected failure", e); + fail(e.getMessage()); + } + } + ); + + Settings settingsWithCompress = Settings.builder().put(TransportSettings.TRANSPORT_COMPRESS.getKey(), true).build(); + ConnectionProfile connectionProfile = ConnectionProfile.buildDefaultConnectionProfile(settingsWithCompress); + serviceC.connectToNode(serviceA.getLocalDiscoNode(), connectionProfile); + + TransportFuture res = serviceC.submitRequest( + nodeA, + "internal:sayHello", + TransportRequest.Empty.INSTANCE, + TransportRequestOptions.EMPTY, + new TransportResponseHandler() { + @Override + public TransportResponse.Empty read(StreamInput in) { + return TransportResponse.Empty.INSTANCE; + } + + @Override + public String executor() { + return ThreadPool.Names.GENERIC; + } + + @Override + public void handleResponse(TransportResponse.Empty response) {} + + @Override + public void handleException(TransportException exp) { + logger.error("Unexpected failure", exp); + fail("got exception instead of a response: " + exp.getMessage()); + } + } + ); + + try { + TransportResponse.Empty message = res.get(); + MatcherAssert.assertThat(message, notNullValue()); + } catch (Exception e) { + MatcherAssert.assertThat(e.getMessage(), false, equalTo(true)); + } + } + } +} diff --git a/src/test/java/TestTransportRequest.java b/src/test/java/TestTransportRequest.java new file mode 100644 index 0000000..d14ebe3 --- /dev/null +++ b/src/test/java/TestTransportRequest.java @@ -0,0 +1,151 @@ +import org.hamcrest.MatcherAssert; +import org.junit.After; +import org.junit.Before; +import org.junit.jupiter.api.Test; +import org.junit.runner.RunWith; +import org.opensearch.Version; +import org.opensearch.cluster.coordination.DeterministicTaskQueue; +import org.opensearch.cluster.coordination.FollowersChecker; +import org.opensearch.cluster.coordination.NodeHealthCheckFailureException; +import org.opensearch.cluster.node.DiscoveryNode; +import org.opensearch.cluster.node.DiscoveryNodes; +import org.opensearch.common.settings.Settings; +import org.opensearch.monitor.NodeHealthService; +import org.opensearch.monitor.StatusInfo; +import org.opensearch.test.OpenSearchTestCase; +import org.opensearch.test.transport.MockTransport; +import org.opensearch.transport.TransportRequest; +import org.opensearch.transport.TransportResponse; +import org.opensearch.transport.TransportService; +import com.carrotsearch.randomizedtesting.RandomizedRunner; + + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; + +import static java.util.Collections.emptySet; +import static org.hamcrest.Matchers.equalTo; +import static org.opensearch.cluster.coordination.FollowersChecker.*; +import static org.opensearch.monitor.StatusInfo.Status.HEALTHY; +import static org.opensearch.node.Node.NODE_NAME_SETTING; + +@RunWith(RandomizedRunner.class) +public class TestTransportRequest extends OpenSearchTestCase { + @Before + public void setupNew() throws Exception { + System.out.println("hello"); + } + + @After + public void setupNewAfter() throws Exception { + System.out.println("hello"); + } + + + private static Settings randomSettings() { + final Settings.Builder settingsBuilder = Settings.builder(); + if (randomBoolean()) { + settingsBuilder.put(FOLLOWER_CHECK_RETRY_COUNT_SETTING.getKey(), randomIntBetween(1, 10)); + } + if (random().nextBoolean()) { + settingsBuilder.put(FOLLOWER_CHECK_INTERVAL_SETTING.getKey(), randomIntBetween(100, 100000) + "ms"); + } + if (random().nextBoolean()) { + settingsBuilder.put(FOLLOWER_CHECK_TIMEOUT_SETTING.getKey(), randomIntBetween(1, 100000) + "ms"); + } + return settingsBuilder.build(); + } + + // Need to add @Test for all the test + //@Test + public void testFailsNodeThatIsUnhealthy() { + testTransportRequest( + randomSettings(), + () -> { throw new NodeHealthCheckFailureException("non writable exception"); }, + "health check failed", + 0, + () -> new StatusInfo(HEALTHY, "healthy-info") + ); + } + + + private void testTransportRequest( + Settings testSettings, + Supplier responder, + String failureReason, + long expectedFailureTime, + NodeHealthService nodeHealthService + ) { + final DiscoveryNode localNode = new DiscoveryNode("local-node", buildNewFakeTransportAddress(), Version.CURRENT); + final DiscoveryNode otherNode = new DiscoveryNode("other-node", buildNewFakeTransportAddress(), Version.CURRENT); + final Settings settings = Settings.builder().put(NODE_NAME_SETTING.getKey(), localNode.getName()).put(testSettings).build(); + final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(settings, random()); + + final MockTransport mockTransport = new MockTransport() { + @Override + protected void onSendRequest(long requestId, String action, TransportRequest request, DiscoveryNode node) { + assertNotEquals(node, localNode); + deterministicTaskQueue.scheduleNow(new Runnable() { + @Override + public void run() { + if (node.equals(otherNode) == false) { + // other nodes are ok + handleResponse(requestId, TransportResponse.Empty.INSTANCE); + return; + } + try { + final TransportResponse.Empty response = responder.get(); + if (response != null) { + handleResponse(requestId, response); + } + } catch (Exception e) { + handleRemoteError(requestId, e); + } + } + + @Override + public String toString() { + return "sending response to [" + action + "][" + requestId + "] from " + node; + } + }); + } + }; + + final TransportService transportService = mockTransport.createTransportService( + settings, + deterministicTaskQueue.getThreadPool(), + TransportService.NOOP_TRANSPORT_INTERCEPTOR, + boundTransportAddress -> localNode, + null, + emptySet() + ); + + transportService.start(); + transportService.acceptIncomingRequests(); + + final AtomicBoolean nodeFailed = new AtomicBoolean(); + + final FollowersChecker followersChecker = new FollowersChecker( + settings, + transportService, + fcr -> { assert false : fcr; }, + (node, reason) -> { + assertTrue(nodeFailed.compareAndSet(false, true)); + MatcherAssert.assertThat(reason, equalTo(failureReason)); + }, + nodeHealthService + ); + + DiscoveryNodes discoveryNodes = DiscoveryNodes.builder().add(localNode).add(otherNode).localNodeId(localNode.getId()).build(); + followersChecker.setCurrentNodes(discoveryNodes); + while (nodeFailed.get() == false) { + if (deterministicTaskQueue.hasRunnableTasks() == false) { + deterministicTaskQueue.advanceTime(); + } + deterministicTaskQueue.runAllRunnableTasks(); + } + MatcherAssert.assertThat(deterministicTaskQueue.getCurrentTimeMillis(), equalTo(expectedFailureTime)); + + } + +}