diff --git a/envoy/1.20/patches/envoy/20240519-wasm-upgrade.patch b/envoy/1.20/patches/envoy/20240519-wasm-upgrade.patch new file mode 100644 index 000000000..e9373dcfa --- /dev/null +++ b/envoy/1.20/patches/envoy/20240519-wasm-upgrade.patch @@ -0,0 +1,5505 @@ +diff -Naur envoy/bazel/dependency_imports.bzl envoy-new/bazel/dependency_imports.bzl +--- envoy/bazel/dependency_imports.bzl 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/dependency_imports.bzl 2024-05-19 11:59:31.674438554 +0800 +@@ -1,14 +1,14 @@ + load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies") + load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") +-load("@bazel_toolchains//rules/exec_properties:exec_properties.bzl", "create_rbe_exec_properties_dict", "custom_exec_properties") + load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository") + load("@build_bazel_rules_apple//apple:repositories.bzl", "apple_rules_dependencies") + load("@rules_fuzzing//fuzzing:repositories.bzl", "rules_fuzzing_dependencies") + load("@upb//bazel:workspace_deps.bzl", "upb_deps") +-load("@rules_rust//rust:repositories.bzl", "rust_repositories") ++load("@rules_rust//rust:repositories.bzl", "rules_rust_dependencies", "rust_register_toolchains") + load("@rules_antlr//antlr:deps.bzl", "antlr_dependencies") + load("@proxy_wasm_rust_sdk//bazel:dependencies.bzl", "proxy_wasm_rust_sdk_dependencies") + load("@rules_cc//cc:repositories.bzl", "rules_cc_dependencies", "rules_cc_toolchains") ++load("@emsdk//:emscripten_deps.bzl", "emscripten_deps") + + # go version for rules_go + GO_VERSION = "1.15.5" +@@ -20,7 +20,8 @@ + go_register_toolchains(go_version) + gazelle_dependencies() + apple_rules_dependencies() +- rust_repositories() ++ rules_rust_dependencies() ++ rust_register_toolchains() + upb_deps() + antlr_dependencies(472) + proxy_wasm_rust_sdk_dependencies() +@@ -30,13 +31,7 @@ + ) + rules_cc_dependencies() + rules_cc_toolchains() +- +- custom_exec_properties( +- name = "envoy_large_machine_exec_property", +- constants = { +- "LARGE_MACHINE": create_rbe_exec_properties_dict(labels = dict(size = "large")), +- }, +- ) ++ emscripten_deps() + + # These dependencies, like most of the Go in this repository, exist only for the API. + go_repository( +diff -Naur envoy/bazel/envoy_test.bzl envoy-new/bazel/envoy_test.bzl +--- envoy/bazel/envoy_test.bzl 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/envoy_test.bzl 2024-05-19 11:59:31.674438554 +0800 +@@ -148,6 +148,7 @@ + repository = "", + external_deps = [], + deps = [], ++ alimesh_deps = [], + tags = [], + args = [], + copts = [], +@@ -160,6 +161,11 @@ + env = {}): + coverage_tags = tags + ([] if coverage else ["nocoverage"]) + ++ deps = deps + select({ ++ "@envoy//bazel:alimesh": [], ++ "//conditions:default": alimesh_deps, ++ }) ++ + cc_test( + name = name, + srcs = srcs, +@@ -193,6 +199,7 @@ + data = [], + external_deps = [], + deps = [], ++ alimesh_deps = [], + repository = "", + tags = [], + include_prefix = None, +@@ -200,6 +207,12 @@ + alwayslink = 1, + **kargs): + disable_pch = kargs.pop("disable_pch", True) ++ ++ deps = deps + select({ ++ "@envoy//bazel:alimesh": [], ++ "//conditions:default": alimesh_deps, ++ }) ++ + _envoy_cc_test_infrastructure_library( + name, + srcs, +diff -Naur envoy/bazel/external/cargo/BUILD.bazel envoy-new/bazel/external/cargo/BUILD.bazel +--- envoy/bazel/external/cargo/BUILD.bazel 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/external/cargo/BUILD.bazel 2024-05-19 11:59:31.674438554 +0800 +@@ -23,8 +23,18 @@ + + # Export file for Stardoc support + exports_files( +- [ +- "crates.bzl", +- ], ++ glob([ ++ "**/*.bazel", ++ "**/*.bzl", ++ ]), ++ visibility = ["//visibility:public"], ++) ++ ++filegroup( ++ name = "srcs", ++ srcs = glob([ ++ "**/*.bazel", ++ "**/*.bzl", ++ ]), + visibility = ["//visibility:public"], + ) +diff -Naur envoy/bazel/external/cargo/remote/BUILD.bazel envoy-new/bazel/external/cargo/remote/BUILD.bazel +--- envoy/bazel/external/cargo/remote/BUILD.bazel 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/external/cargo/remote/BUILD.bazel 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,17 @@ ++# Export file for Stardoc support ++exports_files( ++ glob([ ++ "**/*.bazel", ++ "**/*.bzl", ++ ]), ++ visibility = ["//visibility:public"], ++) ++ ++filegroup( ++ name = "srcs", ++ srcs = glob([ ++ "**/*.bazel", ++ "**/*.bzl", ++ ]), ++ visibility = ["//visibility:public"], ++) +diff -Naur envoy/bazel/external/cargo/remote/BUILD.protobuf-2.24.1.bazel envoy-new/bazel/external/cargo/remote/BUILD.protobuf-2.24.1.bazel +--- envoy/bazel/external/cargo/remote/BUILD.protobuf-2.24.1.bazel 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/external/cargo/remote/BUILD.protobuf-2.24.1.bazel 2024-05-19 11:59:31.674438554 +0800 +@@ -10,9 +10,10 @@ + + # buildifier: disable=load + load( +- "@rules_rust//rust:rust.bzl", ++ "@rules_rust//rust:defs.bzl", + "rust_binary", + "rust_library", ++ "rust_proc_macro", + "rust_test", + ) + +@@ -51,6 +52,7 @@ + ], + tags = [ + "cargo-raze", ++ "crate-name=protobuf", + "manual", + ], + version = "2.24.1", +@@ -69,7 +71,6 @@ + crate_features = [ + ], + crate_root = "src/lib.rs", +- crate_type = "lib", + data = [], + edition = "2018", + rustc_flags = [ +diff -Naur envoy/bazel/external/wasmtime.BUILD envoy-new/bazel/external/wasmtime.BUILD +--- envoy/bazel/external/wasmtime.BUILD 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/external/wasmtime.BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -1,5 +1,5 @@ + load("@rules_cc//cc:defs.bzl", "cc_library") +-load("@rules_rust//rust:rust.bzl", "rust_library") ++load("@rules_rust//rust:defs.bzl", "rust_static_library") + + licenses(["notice"]) # Apache 2 + +@@ -13,11 +13,11 @@ + visibility = ["//visibility:private"], + ) + +-rust_library( ++# TODO(keith): This should be using rust_library https://github.com/bazelbuild/rules_rust/issues/1238 ++rust_static_library( + name = "rust_c_api", + srcs = glob(["crates/c-api/src/**/*.rs"]), + crate_root = "crates/c-api/src/lib.rs", +- crate_type = "staticlib", + edition = "2018", + proc_macro_deps = [ + "@proxy_wasm_cpp_host//bazel/cargo:wasmtime_c_api_macros", +diff -Naur envoy/bazel/foreign_cc/proxy_wasm_cpp_host.patch envoy-new/bazel/foreign_cc/proxy_wasm_cpp_host.patch +--- envoy/bazel/foreign_cc/proxy_wasm_cpp_host.patch 2024-05-19 11:59:43.170438780 +0800 ++++ envoy-new/bazel/foreign_cc/proxy_wasm_cpp_host.patch 1970-01-01 08:00:00.000000000 +0800 +@@ -1,78 +0,0 @@ +-diff --git a/include/proxy-wasm/exports.h b/include/proxy-wasm/exports.h +-index ded6419..334cee4 100644 +---- a/include/proxy-wasm/exports.h +-+++ b/include/proxy-wasm/exports.h +-@@ -129,6 +129,11 @@ Word call_foreign_function(Word function_name, Word function_name_size, Word arg +- +- // Runtime environment functions exported from envoy to wasm. +- +-+Word wasi_unstable_path_open(Word fd, Word dir_flags, Word path, Word path_len, Word oflags, +-+ int64_t fs_rights_base, int64_t fg_rights_inheriting, Word fd_flags, +-+ Word nwritten_ptr); +-+Word wasi_unstable_fd_prestat_get(Word fd, Word buf_ptr); +-+Word wasi_unstable_fd_prestat_dir_name(Word fd, Word path_ptr, Word path_len); +- Word wasi_unstable_fd_write(Word fd, Word iovs, Word iovs_len, Word nwritten_ptr); +- Word wasi_unstable_fd_read(Word, Word, Word, Word); +- Word wasi_unstable_fd_seek(Word, int64_t, Word, Word); +-@@ -166,7 +171,7 @@ Word pthread_equal(Word left, Word right); +- #define FOR_ALL_WASI_FUNCTIONS(_f) \ +- _f(fd_write) _f(fd_read) _f(fd_seek) _f(fd_close) _f(fd_fdstat_get) _f(environ_get) \ +- _f(environ_sizes_get) _f(args_get) _f(args_sizes_get) _f(clock_time_get) _f(random_get) \ +-- _f(proc_exit) +-+ _f(proc_exit) _f(path_open) _f(fd_prestat_get) _f(fd_prestat_dir_name) +- +- // Helpers to generate a stub to pass to VM, in place of a restricted proxy-wasm capability. +- #define _CREATE_PROXY_WASM_STUB(_fn) \ +-diff --git a/include/proxy-wasm/wasm_vm.h b/include/proxy-wasm/wasm_vm.h +-index c02bb6e..c13c78c 100644 +---- a/include/proxy-wasm/wasm_vm.h +-+++ b/include/proxy-wasm/wasm_vm.h +-@@ -110,6 +110,8 @@ using WasmCallback_WWl = Word (*)(Word, int64_t); +- using WasmCallback_WWlWW = Word (*)(Word, int64_t, Word, Word); +- using WasmCallback_WWm = Word (*)(Word, uint64_t); +- using WasmCallback_WWmW = Word (*)(Word, uint64_t, Word); +-+using WasmCallback_WWWWWWllWW = Word (*)(Word, Word, Word, Word, Word, int64_t, int64_t, Word, +-+ Word); +- using WasmCallback_dd = double (*)(double); +- +- #define FOR_ALL_WASM_VM_IMPORTS(_f) \ +-@@ -127,7 +129,8 @@ using WasmCallback_dd = double (*)(double); +- _f(proxy_wasm::WasmCallback_WWlWW) \ +- _f(proxy_wasm::WasmCallback_WWm) \ +- _f(proxy_wasm::WasmCallback_WWmW) \ +-- _f(proxy_wasm::WasmCallback_dd) +-+ _f(proxy_wasm::WasmCallback_WWWWWWllWW) \ +-+ _f(proxy_wasm::WasmCallback_dd) +- +- enum class Cloneable { +- NotCloneable, // VMs can not be cloned and should be created from scratch. +-diff --git a/src/exports.cc b/src/exports.cc +-index 0922b2d..d597914 100644 +---- a/src/exports.cc +-+++ b/src/exports.cc +-@@ -648,6 +648,25 @@ Word grpc_send(Word token, Word message_ptr, Word message_size, Word end_stream) +- return context->grpcSend(token, message.value(), end_stream); +- } +- +-+// __wasi_errno_t path_open(__wasi_fd_t fd, __wasi_lookupflags_t dirflags, const char *path, +-+// size_t path_len, __wasi_oflags_t oflags, __wasi_rights_t fs_rights_base, __wasi_rights_t +-+// fs_rights_inheriting, __wasi_fdflags_t fdflags, __wasi_fd_t *retptr0) +-+Word wasi_unstable_path_open(Word fd, Word dir_flags, Word path, Word path_len, Word oflags, +-+ int64_t fs_rights_base, int64_t fg_rights_inheriting, Word fd_flags, +-+ Word nwritten_ptr) { +-+ return 44; // __WASI_ERRNO_NOENT +-+} +-+ +-+// __wasi_errno_t __wasi_fd_prestat_get(__wasi_fd_t fd, __wasi_prestat_t *retptr0) +-+Word wasi_unstable_fd_prestat_get(Word fd, Word buf_ptr) { +-+ return 8; // __WASI_ERRNO_BADF +-+} +-+ +-+// __wasi_errno_t __wasi_fd_prestat_dir_name(__wasi_fd_t fd, uint8_t * path, __wasi_size_t path_len) +-+Word wasi_unstable_fd_prestat_dir_name(Word fd, Word path_ptr, Word path_len) { +-+ return 52; // __WASI_ERRNO_ENOSYS +-+} +-+ +- // Implementation of writev-like() syscall that redirects stdout/stderr to Envoy +- // logs. +- Word writevImpl(Word fd, Word iovs, Word iovs_len, Word *nwritten_ptr) { +diff -Naur envoy/bazel/repositories.bzl envoy-new/bazel/repositories.bzl +--- envoy/bazel/repositories.bzl 2024-05-19 11:59:43.174438780 +0800 ++++ envoy-new/bazel/repositories.bzl 2024-05-19 11:59:31.674438554 +0800 +@@ -212,7 +212,8 @@ + _io_opencensus_cpp() + _com_github_curl() + _com_github_envoyproxy_sqlparser() +- _com_googlesource_chromium_v8() ++ _v8() ++ _com_googlesource_chromium_base_trace_event_common() + _com_github_google_quiche() + _com_googlesource_googleurl() + _com_lightstep_tracer_cpp() +@@ -223,7 +224,7 @@ + _upb() + _proxy_wasm_cpp_sdk() + _proxy_wasm_cpp_host() +- _emscripten_toolchain() ++ _emsdk() + _rules_fuzzing() + external_http_archive("proxy_wasm_rust_sdk") + external_http_archive("com_googlesource_code_re2") +@@ -809,16 +810,28 @@ + actual = "@envoy//bazel/foreign_cc:curl", + ) + +-def _com_googlesource_chromium_v8(): +- external_genrule_repository( +- name = "com_googlesource_chromium_v8", +- genrule_cmd_file = "@envoy//bazel/external:wee8.genrule_cmd", +- build_file = "@envoy//bazel/external:wee8.BUILD", +- patches = ["@envoy//bazel/external:wee8.patch"], ++def _v8(): ++ external_http_archive( ++ name = "v8", ++ patches = [ ++ "@envoy//bazel:v8.patch", ++ "@envoy//bazel:v8_include.patch", ++ ], ++ patch_args = ["-p1"], + ) + native.bind( + name = "wee8", +- actual = "@com_googlesource_chromium_v8//:wee8", ++ actual = "@v8//:wee8", ++ ) ++ ++def _com_googlesource_chromium_base_trace_event_common(): ++ external_http_archive( ++ name = "com_googlesource_chromium_base_trace_event_common", ++ build_file = "@v8//:bazel/BUILD.trace_event_common", ++ ) ++ native.bind( ++ name = "base_trace_event_common", ++ actual = "@com_googlesource_chromium_base_trace_event_common//:trace_event_common", + ) + + def _com_github_google_quiche(): +@@ -941,23 +954,10 @@ + external_http_archive(name = "proxy_wasm_cpp_sdk") + + def _proxy_wasm_cpp_host(): +- external_http_archive( +- name = "proxy_wasm_cpp_host", +- patches = ["@envoy//bazel/foreign_cc:proxy_wasm_cpp_host.patch"], +- patch_args = ["-p1"], +- ) ++ external_http_archive(name = "proxy_wasm_cpp_host") + +-def _emscripten_toolchain(): +- external_http_archive( +- name = "emscripten_toolchain", +- build_file_content = _build_all_content(exclude = [ +- "upstream/emscripten/cache/is_vanilla.txt", +- ".emscripten_sanity", +- ]), +- patch_cmds = [ +- "if [[ \"$(uname -m)\" == \"x86_64\" ]]; then ./emsdk install 2.0.7 && ./emsdk activate --embedded 2.0.7; fi", +- ], +- ) ++def _emsdk(): ++ external_http_archive(name = "emsdk") + + def _com_github_google_jwt_verify(): + external_http_archive("com_github_google_jwt_verify") +diff -Naur envoy/bazel/repositories.bzl.orig envoy-new/bazel/repositories.bzl.orig +--- envoy/bazel/repositories.bzl.orig 2024-05-19 11:59:43.142438779 +0800 ++++ envoy-new/bazel/repositories.bzl.orig 2024-05-19 11:59:31.674438554 +0800 +@@ -212,7 +212,9 @@ + _io_opencensus_cpp() + _com_github_curl() + _com_github_envoyproxy_sqlparser() +- _com_googlesource_chromium_v8() ++ _v8() ++ _com_googlesource_chromium_base_trace_event_common() ++ _com_googlesource_chromium_zlib() + _com_github_google_quiche() + _com_googlesource_googleurl() + _com_lightstep_tracer_cpp() +@@ -223,7 +225,7 @@ + _upb() + _proxy_wasm_cpp_sdk() + _proxy_wasm_cpp_host() +- _emscripten_toolchain() ++ _emsdk() + _rules_fuzzing() + external_http_archive("proxy_wasm_rust_sdk") + external_http_archive("com_googlesource_code_re2") +@@ -272,6 +274,8 @@ + actual = "@bazel_tools//tools/cpp/runfiles", + ) + ++ _com_github_higress_wasm_extensions() ++ + def _boringssl(): + external_http_archive( + name = "boringssl", +@@ -807,16 +811,35 @@ + actual = "@envoy//bazel/foreign_cc:curl", + ) + +-def _com_googlesource_chromium_v8(): +- external_genrule_repository( +- name = "com_googlesource_chromium_v8", +- genrule_cmd_file = "@envoy//bazel/external:wee8.genrule_cmd", +- build_file = "@envoy//bazel/external:wee8.BUILD", +- patches = ["@envoy//bazel/external:wee8.patch"], ++def _v8(): ++ external_http_archive( ++ name = "v8", ++ patches = ["@envoy//bazel:v8.patch"], ++ patch_args = ["-p1"], + ) + native.bind( + name = "wee8", +- actual = "@com_googlesource_chromium_v8//:wee8", ++ actual = "@v8//:wee8", ++ ) ++ ++def _com_googlesource_chromium_base_trace_event_common(): ++ external_http_archive( ++ name = "com_googlesource_chromium_base_trace_event_common", ++ build_file = "@v8//:bazel/BUILD.trace_event_common", ++ ) ++ native.bind( ++ name = "base_trace_event_common", ++ actual = "@com_googlesource_chromium_base_trace_event_common//:trace_event_common", ++ ) ++ ++def _com_googlesource_chromium_zlib(): ++ external_http_archive( ++ name = "com_googlesource_chromium_zlib", ++ build_file = "@v8//:bazel/BUILD.zlib", ++ ) ++ native.bind( ++ name = "zlib_compression_utils", ++ actual = "@com_googlesource_chromium_zlib//:zlib_compression_utils", + ) + + def _com_github_google_quiche(): +@@ -941,17 +964,8 @@ + def _proxy_wasm_cpp_host(): + external_http_archive(name = "proxy_wasm_cpp_host") + +-def _emscripten_toolchain(): +- external_http_archive( +- name = "emscripten_toolchain", +- build_file_content = _build_all_content(exclude = [ +- "upstream/emscripten/cache/is_vanilla.txt", +- ".emscripten_sanity", +- ]), +- patch_cmds = [ +- "if [[ \"$(uname -m)\" == \"x86_64\" ]]; then ./emsdk install 2.0.7 && ./emsdk activate --embedded 2.0.7; fi", +- ], +- ) ++def _emsdk(): ++ external_http_archive(name = "emsdk") + + def _com_github_google_jwt_verify(): + external_http_archive("com_github_google_jwt_verify") +@@ -1062,6 +1076,17 @@ + actual = "@com_github_wasm_c_api//:wasmtime_lib", + ) + ++def _com_github_higress_wasm_extensions(): ++ native.local_repository( ++ name = "com_github_higress_wasm_extensions", ++ path = "../../wasm-cpp", ++ ) ++ ++ native.bind( ++ name = "basic_auth_lib", ++ actual = "@com_github_higress_wasm_extensions//extensions/basic_auth:basic_auth_lib", ++ ) ++ + def _rules_fuzzing(): + external_http_archive( + name = "rules_fuzzing", +diff -Naur envoy/bazel/repositories_extra.bzl envoy-new/bazel/repositories_extra.bzl +--- envoy/bazel/repositories_extra.bzl 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/repositories_extra.bzl 2024-05-19 11:59:31.674438554 +0800 +@@ -1,5 +1,6 @@ ++load("@emsdk//:deps.bzl", emsdk_deps = "deps") + load("@rules_python//python:pip.bzl", "pip_install", "pip_parse") +-load("@proxy_wasm_cpp_host//bazel/cargo:crates.bzl", "proxy_wasm_cpp_host_fetch_remote_crates") ++load("@proxy_wasm_cpp_host//bazel/cargo/wasmtime:crates.bzl", "wasmtime_fetch_remote_crates") + load("//bazel/external/cargo:crates.bzl", "raze_fetch_remote_crates") + + # Python dependencies. +@@ -28,5 +29,6 @@ + # Envoy deps that rely on a first stage of dependency loading in envoy_dependencies(). + def envoy_dependencies_extra(): + _python_deps() +- proxy_wasm_cpp_host_fetch_remote_crates() ++ emsdk_deps() + raze_fetch_remote_crates() ++ wasmtime_fetch_remote_crates() +diff -Naur envoy/bazel/repository_locations.bzl envoy-new/bazel/repository_locations.bzl +--- envoy/bazel/repository_locations.bzl 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/repository_locations.bzl 2024-05-19 11:59:31.674438554 +0800 +@@ -816,20 +816,35 @@ + release_date = "2021-09-15", + cpe = "cpe:2.3:a:haxx:libcurl:*", + ), +- com_googlesource_chromium_v8 = dict( ++ v8 = dict( + project_name = "V8", + project_desc = "Google’s open source high-performance JavaScript and WebAssembly engine, written in C++", + project_url = "https://v8.dev", +- version = "9.2.230.13", +- # This archive was created using https://storage.googleapis.com/envoyproxy-wee8/wee8-archive.sh +- # and contains complete checkout of V8 with all dependencies necessary to build wee8. +- sha256 = "77b4d6aaabe1dc60bf6bd2523a187d82292c27a2073ec48610dd098e3d4f80ce", +- urls = ["https://storage.googleapis.com/envoyproxy-wee8/wee8-{version}.tar.gz"], ++ # NOTE: Update together with com_googlesource_chromium_base_trace_event_common. ++ version = "10.7.193.13", ++ # Static snapshot created using https://storage.googleapis.com/envoyproxy-wee8/wee8-fetch-deps.sh. ++ sha256 = "2170df76ce5d7ecd7fb8d131370d210152f200273cba126f06d8b88fb53c9fbc", ++ urls = ["https://storage.googleapis.com/envoyproxy-wee8/v8-{version}.tar.gz"], + use_category = ["dataplane_ext"], + extensions = ["envoy.wasm.runtime.v8"], +- release_date = "2021-06-25", ++ release_date = "2022-10-12", + cpe = "cpe:2.3:a:google:v8:*", + ), ++ com_googlesource_chromium_base_trace_event_common = dict( ++ project_name = "Chromium's trace event headers", ++ project_desc = "Chromium's trace event headers", ++ project_url = "https://chromium.googlesource.com/chromium/src/base/trace_event/common/", ++ # NOTE: Update together with v8. ++ # Use version and sha256 from https://storage.googleapis.com/envoyproxy-wee8/v8--deps.sha256. ++ version = "521ac34ebd795939c7e16b37d9d3ddb40e8ed556", ++ # Static snapshot created using https://storage.googleapis.com/envoyproxy-wee8/wee8-fetch-deps.sh. ++ sha256 = "d99726bd452d1dd6cd50ab33060774e8437d9f0fc6079589f657fe369c66ec09", ++ urls = ["https://storage.googleapis.com/envoyproxy-wee8/chromium-base_trace_event_common-{version}.tar.gz"], ++ use_category = ["dataplane_ext"], ++ extensions = ["envoy.wasm.runtime.v8"], ++ release_date = "2022-10-12", ++ cpe = "N/A", ++ ), + com_github_google_quiche = dict( + project_name = "QUICHE", + project_desc = "QUICHE (QUIC, HTTP/2, Etc) is Google‘s implementation of QUIC and related protocols", +@@ -991,11 +1006,11 @@ + proxy_wasm_cpp_sdk = dict( + project_name = "WebAssembly for Proxies (C++ SDK)", + project_desc = "WebAssembly for Proxies (C++ SDK)", +- project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-sdk", +- version = "fd0be8405db25de0264bdb78fae3a82668c03782", +- sha256 = "c57de2425b5c61d7f630c5061e319b4557ae1f1c7526e5a51c33dc1299471b08", ++ project_url = "https://github.com/higress-group/proxy-wasm-cpp-sdk", ++ version = "47bb9cd141a151415ad6a597ed60c78bea2ce0b7", ++ sha256 = "cab5efa54c0cec8eb17c0a2f6ce72b9cd84ebba2b332e919187f963a5d7cfaa1", + strip_prefix = "proxy-wasm-cpp-sdk-{version}", +- urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"], ++ urls = ["https://github.com/higress-group/proxy-wasm-cpp-sdk/archive/{version}.tar.gz"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", +@@ -1015,11 +1030,11 @@ + proxy_wasm_cpp_host = dict( + project_name = "WebAssembly for Proxies (C++ host implementation)", + project_desc = "WebAssembly for Proxies (C++ host implementation)", +- project_url = "https://github.com/proxy-wasm/proxy-wasm-cpp-host", +- version = "03185974ef574233a5f6383311eb74a380146fe2", +- sha256 = "34948e3ba239cc721af8d0a0a5b678325f363cbd542bddecf2267d24780d5b4d", ++ project_url = "https://github.com/higress-group/proxy-wasm-cpp-host", ++ version = "f8b624dc6c37d4e0a3c1b332652746793e2031ad", ++ sha256 = "ba20328101c91d0ae6383947ced99620cd9b4ea22ab2fda6b26f343b38c3be83", + strip_prefix = "proxy-wasm-cpp-host-{version}", +- urls = ["https://github.com/proxy-wasm/proxy-wasm-cpp-host/archive/{version}.tar.gz"], ++ urls = ["https://github.com/higress-group/proxy-wasm-cpp-host/archive/{version}.tar.gz"], + use_category = ["dataplane_ext"], + extensions = [ + "envoy.access_loggers.wasm", +@@ -1033,43 +1048,43 @@ + "envoy.wasm.runtime.wavm", + "envoy.wasm.runtime.wasmtime", + ], +- release_date = "2021-08-12", ++ release_date = "2024-05-18", + cpe = "N/A", + ), + proxy_wasm_rust_sdk = dict( + project_name = "WebAssembly for Proxies (Rust SDK)", + project_desc = "WebAssembly for Proxies (Rust SDK)", + project_url = "https://github.com/proxy-wasm/proxy-wasm-rust-sdk", +- version = "abd0f5437212e5fd3dd6a70eac3959934278e643", +- sha256 = "a7c7f2fadc151e00694507598880894dfe2d2ea24f858ec9681d38f2abfbe811", ++ version = "0.2.0", ++ sha256 = "010249eac821641b32813670d0ec8a372899a70a7769de5e52e0292d8cd7e0d5", + strip_prefix = "proxy-wasm-rust-sdk-{version}", +- urls = ["https://github.com/proxy-wasm/proxy-wasm-rust-sdk/archive/{version}.tar.gz"], ++ urls = ["https://github.com/proxy-wasm/proxy-wasm-rust-sdk/archive/v{version}.tar.gz"], + use_category = ["test_only"], +- release_date = "2021-07-13", ++ release_date = "2022-04-13", + cpe = "N/A", + ), +- emscripten_toolchain = dict( ++ emsdk = dict( + project_name = "Emscripten SDK", + project_desc = "Emscripten SDK (use by Wasm)", + project_url = "https://github.com/emscripten-core/emsdk", +- version = "2.0.7", +- sha256 = "ce7a5c76e8b425aca874cea329fd9ac44b203b777053453b6a37b4496c5ce34f", +- strip_prefix = "emsdk-{version}", ++ # v3.1.7 with Bazel fixes ++ version = "0ea8f8a8707070e9a7c83fbb4a3065683bcf1799", ++ sha256 = "1ca0ff918d476c55707bb99bc0452be28ac5fb8f22a9260a8aae8a38d1bc0e27", ++ strip_prefix = "emsdk-{version}/bazel", + urls = ["https://github.com/emscripten-core/emsdk/archive/{version}.tar.gz"], +- use_category = ["build"], +- release_date = "2020-10-13", ++ use_category = ["test_only"], ++ release_date = "2022-03-09", + ), + rules_rust = dict( + project_name = "Bazel rust rules", + project_desc = "Bazel rust rules (used by Wasm)", + project_url = "https://github.com/bazelbuild/rules_rust", +- version = "7e7246f6c48a5d4e69744cd79b9ccb8886966ee2", +- sha256 = "d54b379559f3fe6ff0cd251be216a5e35acf241451eec8144455482e8f4748f8", +- strip_prefix = "rules_rust-{version}", +- urls = ["https://github.com/bazelbuild/rules_rust/archive/{version}.tar.gz"], ++ version = "0.2.0", ++ sha256 = "39655ab175e3c6b979f362f55f58085528f1647957b0e9b3a07f81d8a9c3ea0a", ++ urls = ["https://github.com/bazelbuild/rules_rust/releases/download/{version}/rules_rust-v{version}.tar.gz"], + use_category = ["dataplane_ext"], + extensions = ["envoy.wasm.runtime.wasmtime"], +- release_date = "2021-06-29", ++ release_date = "2022-03-30", + cpe = "N/A", + ), + rules_antlr = dict( +diff -Naur envoy/bazel/v8_include.patch envoy-new/bazel/v8_include.patch +--- envoy/bazel/v8_include.patch 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/bazel/v8_include.patch 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,41 @@ ++# fix include types for late clang (15.0.7) / gcc (13.2.1) ++# for Arch linux / Fedora, like in ++# In file included from external/v8/src/torque/torque.cc:5: ++# In file included from external/v8/src/torque/source-positions.h:10: ++# In file included from external/v8/src/torque/contextual.h:10: ++# In file included from external/v8/src/base/macros.h:12: ++# external/v8/src/base/logging.h:154:26: error: use of undeclared identifier 'uint16_t' ++ ++diff --git a/src/base/logging.h b/src/base/logging.h ++--- a/src/base/logging.h +++++ b/src/base/logging.h ++@@ -5,6 +5,7 @@ ++ #ifndef V8_BASE_LOGGING_H_ ++ #define V8_BASE_LOGGING_H_ ++ +++#include ++ #include ++ #include ++ #include ++diff --git a/src/base/macros.h b/src/base/macros.h ++--- a/src/base/macros.h +++++ b/src/base/macros.h ++@@ -5,6 +5,7 @@ ++ #ifndef V8_BASE_MACROS_H_ ++ #define V8_BASE_MACROS_H_ ++ +++#include ++ #include ++ #include ++ ++diff --git a/src/inspector/v8-string-conversions.h b/src/inspector/v8-string-conversions.h ++--- a/src/inspector/v8-string-conversions.h +++++ b/src/inspector/v8-string-conversions.h ++@@ -5,6 +5,7 @@ ++ #ifndef V8_INSPECTOR_V8_STRING_CONVERSIONS_H_ ++ #define V8_INSPECTOR_V8_STRING_CONVERSIONS_H_ ++ +++#include ++ #include ++ ++ // Conversion routines between UT8 and UTF16, used by string-16.{h,cc}. You may +diff -Naur envoy/bazel/v8.patch envoy-new/bazel/v8.patch +--- envoy/bazel/v8.patch 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/bazel/v8.patch 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,108 @@ ++# 1. Use already imported python dependencies ++# 2. Disable pointer compression (limits the maximum number of WasmVMs). ++# 3. Add support for --define=no_debug_info=1. ++# 4. Allow compiling v8 on macOS 10.15 to 13.0. TODO(dio): Will remove this patch when https://bugs.chromium.org/p/v8/issues/detail?id=13428 is fixed. ++# 5. Don't expose Wasm C API (only Wasm C++ API). ++ ++diff --git a/BUILD.bazel b/BUILD.bazel ++index 4e89f90e7e..ced403d5aa 100644 ++--- a/BUILD.bazel +++++ b/BUILD.bazel ++@@ -4,7 +4,7 @@ ++ ++ load("@bazel_skylib//lib:selects.bzl", "selects") ++ load("@rules_python//python:defs.bzl", "py_binary") ++-load("@v8_python_deps//:requirements.bzl", "requirement") +++load("@base_pip3//:requirements.bzl", "requirement") ++ load( ++ "@v8//:bazel/defs.bzl", ++ "v8_binary", ++diff --git a/BUILD.bazel b/BUILD.bazel ++index 4e89f90e7e..3fcb38b3f3 100644 ++--- a/BUILD.bazel +++++ b/BUILD.bazel ++@@ -157,7 +157,7 @@ v8_int( ++ # If no explicit value for v8_enable_pointer_compression, we set it to 'none'. ++ v8_string( ++ name = "v8_enable_pointer_compression", ++- default = "none", +++ default = "False", ++ ) ++ ++ # Default setting for v8_enable_pointer_compression. ++diff --git a/bazel/defs.bzl b/bazel/defs.bzl ++index e957c0fad3..a6de50e6ab 100644 ++--- a/bazel/defs.bzl +++++ b/bazel/defs.bzl ++@@ -116,6 +116,7 @@ def _default_args(): ++ }) + select({ ++ "@v8//bazel/config:is_clang": [ ++ "-Wno-invalid-offsetof", +++ "-Wno-unneeded-internal-declaration", ++ "-std=c++17", ++ ], ++ "@v8//bazel/config:is_gcc": [ ++@@ -131,6 +132,7 @@ def _default_args(): ++ "-Wno-redundant-move", ++ "-Wno-return-type", ++ "-Wno-stringop-overflow", +++ "-Wno-nonnull", ++ # Use GNU dialect, because GCC doesn't allow using ++ # ##__VA_ARGS__ when in standards-conforming mode. ++ "-std=gnu++17", ++@@ -151,6 +153,23 @@ def _default_args(): ++ "-fno-integrated-as", ++ ], ++ "//conditions:default": [], +++ }) + select({ +++ "@envoy//bazel:no_debug_info": [ +++ "-g0", +++ ], +++ "//conditions:default": [], +++ }) + select({ +++ "@v8//bazel/config:is_macos": [ +++ # The clang available on macOS catalina has a warning that isn't clean on v8 code. +++ "-Wno-range-loop-analysis", +++ +++ # To supress warning on deprecated declaration on v8 code. For example: +++ # external/v8/src/base/platform/platform-darwin.cc:56:22: 'getsectdatafromheader_64' +++ # is deprecated: first deprecated in macOS 13.0. +++ # https://bugs.chromium.org/p/v8/issues/detail?id=13428. +++ "-Wno-deprecated-declarations", +++ ], +++ "//conditions:default": [], ++ }), ++ includes = ["include"], ++ linkopts = select({ ++diff --git a/src/wasm/c-api.cc b/src/wasm/c-api.cc ++index ce3f569fd5..dc8a4c4f6a 100644 ++--- a/src/wasm/c-api.cc +++++ b/src/wasm/c-api.cc ++@@ -2238,6 +2238,8 @@ auto Instance::exports() const -> ownvec { ++ ++ } // namespace wasm ++ +++#if 0 +++ ++ // BEGIN FILE wasm-c.cc ++ ++ extern "C" { ++@@ -3257,3 +3259,5 @@ wasm_instance_t* wasm_frame_instance(const wasm_frame_t* frame) { ++ #undef WASM_DEFINE_SHARABLE_REF ++ ++ } // extern "C" +++ +++#endif ++diff --git a/third_party/inspector_protocol/code_generator.py b/third_party/inspector_protocol/code_generator.py ++index c3768b8..d4a1dda 100644 ++--- a/third_party/inspector_protocol/code_generator.py +++++ b/third_party/inspector_protocol/code_generator.py ++@@ -16,6 +16,8 @@ try: ++ except ImportError: ++ import simplejson as json ++ +++sys.path += [os.path.dirname(__file__)] +++ ++ import pdl ++ ++ try: +diff -Naur envoy/bazel/wasm/wasm.bzl envoy-new/bazel/wasm/wasm.bzl +--- envoy/bazel/wasm/wasm.bzl 2024-05-19 11:59:42.986438776 +0800 ++++ envoy-new/bazel/wasm/wasm.bzl 2024-05-19 11:59:31.674438554 +0800 +@@ -1,5 +1,5 @@ +-load("@proxy_wasm_cpp_sdk//bazel/wasm:wasm.bzl", "wasm_cc_binary") +-load("@rules_rust//rust:rust.bzl", "rust_binary") ++load("@proxy_wasm_cpp_sdk//bazel:defs.bzl", "proxy_wasm_cc_binary") ++load("@rules_rust//rust:defs.bzl", "rust_binary") + + def _wasm_rust_transition_impl(settings, attr): + return { +@@ -65,10 +65,15 @@ + attrs = _wasm_attrs(wasi_rust_transition), + ) + +-def envoy_wasm_cc_binary(name, deps = [], tags = [], **kwargs): +- wasm_cc_binary( ++def envoy_wasm_cc_binary(name, additional_linker_inputs = [], linkopts = [], tags = [], **kwargs): ++ proxy_wasm_cc_binary( + name = name, +- deps = deps + ["@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics"], ++ additional_linker_inputs = additional_linker_inputs + [ ++ "@envoy//source/extensions/common/wasm/ext:envoy_proxy_wasm_api_js", ++ ], ++ linkopts = linkopts + [ ++ "--js-library=$(location @envoy//source/extensions/common/wasm/ext:envoy_proxy_wasm_api_js)", ++ ], + tags = tags + ["manual"], + **kwargs + ) +diff -Naur envoy/.bazelrc envoy-new/.bazelrc +--- envoy/.bazelrc 2024-05-19 11:59:42.954438775 +0800 ++++ envoy-new/.bazelrc 2024-05-19 11:59:31.674438554 +0800 +@@ -65,6 +65,7 @@ + build:asan --copt -fno-sanitize=vptr,function + build:asan --linkopt -fno-sanitize=vptr,function + build:asan --copt -DADDRESS_SANITIZER=1 ++build:asan --copt -DUNDEFINED_SANITIZER=1 + build:asan --copt -D__SANITIZE_ADDRESS__ + build:asan --test_env=ASAN_OPTIONS=handle_abort=1:allow_addr2line=true:check_initialization_order=true:strict_init_order=true:detect_odr_violation=1 + build:asan --test_env=UBSAN_OPTIONS=halt_on_error=true:print_stacktrace=1 +@@ -100,6 +101,7 @@ + build:clang-tsan --copt -fsanitize=thread + build:clang-tsan --linkopt -fsanitize=thread + build:clang-tsan --linkopt -fuse-ld=lld ++build:clang-tsan --copt -DTHREAD_SANITIZER=1 + build:clang-tsan --build_tag_filters=-no_san,-no_tsan + build:clang-tsan --test_tag_filters=-no_san,-no_tsan + # Needed due to https://github.com/libevent/libevent/issues/777 +@@ -118,6 +120,7 @@ + build:clang-msan --linkopt -fsanitize=memory + build:clang-msan --linkopt -fuse-ld=lld + build:clang-msan --copt -fsanitize-memory-track-origins=2 ++build:clang-msan --copt -DMEMORY_SANITIZER=1 + build:clang-msan --test_env=MSAN_SYMBOLIZER_PATH + # MSAN needs -O1 to get reasonable performance. + build:clang-msan --copt -O1 +diff -Naur envoy/envoy/redis/async_client.h envoy-new/envoy/redis/async_client.h +--- envoy/envoy/redis/async_client.h 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/envoy/redis/async_client.h 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,68 @@ ++#pragma once ++ ++#include ++#include ++#include ++#include ++ ++namespace Envoy { ++ ++namespace Event { ++ ++class Dispatcher; ++} ++ ++namespace Redis { ++ ++struct AsyncClientConfig { ++public: ++ AsyncClientConfig(std::string username, std::string password, int op_timeout_milliseconds) ++ : auth_username_(std::move(username)), auth_password_(std::move(password)), ++ op_timeout_(op_timeout_milliseconds), buffer_flush_timeout_(3) {} ++ const std::string auth_username_; ++ const std::string auth_password_; ++ ++ const std::chrono::milliseconds op_timeout_; ++ const uint32_t max_buffer_size_before_flush_{1024}; ++ const std::chrono::milliseconds buffer_flush_timeout_; ++ const uint32_t max_upstream_unknown_connections_{100}; ++ const bool enable_command_stats_{false}; ++}; ++ ++/** ++ * A handle to an outbound request. ++ */ ++class PoolRequest { ++public: ++ virtual ~PoolRequest() = default; ++ ++ /** ++ * Cancel the request. No further request callbacks will be called. ++ */ ++ virtual void cancel() PURE; ++}; ++ ++class AsyncClient { ++public: ++ class Callbacks { ++ public: ++ virtual ~Callbacks() = default; ++ ++ virtual void onSuccess(std::string_view query, std::string&& response) PURE; ++ ++ virtual void onFailure(std::string_view query) PURE; ++ }; ++ ++ virtual ~AsyncClient() = default; ++ ++ virtual void initialize(AsyncClientConfig config) PURE; ++ ++ virtual PoolRequest* send(std::string&& query, Callbacks& callbacks) PURE; ++ ++ virtual Event::Dispatcher& dispatcher() PURE; ++}; ++ ++using AsyncClientPtr = std::unique_ptr; ++ ++} // namespace Redis ++} // namespace Envoy +diff -Naur envoy/envoy/redis/BUILD envoy-new/envoy/redis/BUILD +--- envoy/envoy/redis/BUILD 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/envoy/redis/BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,14 @@ ++load( ++ "//bazel:envoy_build_system.bzl", ++ "envoy_cc_library", ++ "envoy_package", ++) ++ ++licenses(["notice"]) # Apache 2 ++ ++envoy_package() ++ ++envoy_cc_library( ++ name = "async_client_interface", ++ hdrs = ["async_client.h"], ++) +diff -Naur envoy/envoy/upstream/BUILD envoy-new/envoy/upstream/BUILD +--- envoy/envoy/upstream/BUILD 2024-05-19 11:59:43.014438777 +0800 ++++ envoy-new/envoy/upstream/BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -38,6 +38,9 @@ + "@envoy_api//envoy/config/cluster/v3:pkg_cc_proto", + "@envoy_api//envoy/config/core/v3:pkg_cc_proto", + ], ++ alimesh_deps = [ ++ "//envoy/redis:async_client_interface", ++ ], + ) + + envoy_cc_library( +@@ -130,6 +133,9 @@ + ":upstream_interface", + "//envoy/http:async_client_interface", + ], ++ alimesh_deps = [ ++ "//envoy/redis:async_client_interface", ++ ], + ) + + envoy_cc_library( +diff -Naur envoy/envoy/upstream/thread_local_cluster.h envoy-new/envoy/upstream/thread_local_cluster.h +--- envoy/envoy/upstream/thread_local_cluster.h 2024-05-19 11:59:43.014438777 +0800 ++++ envoy-new/envoy/upstream/thread_local_cluster.h 2024-05-19 11:59:31.674438554 +0800 +@@ -2,6 +2,7 @@ + + #include "envoy/common/pure.h" + #include "envoy/http/async_client.h" ++#include "envoy/redis/async_client.h" + #include "envoy/upstream/load_balancer.h" + #include "envoy/upstream/upstream.h" + +@@ -137,6 +138,9 @@ + * owns the client. + */ + virtual Http::AsyncClient& httpAsyncClient() PURE; ++#if defined(ALIMESH) ++ virtual Redis::AsyncClient& redisAsyncClient() PURE; ++#endif + }; + + using ThreadLocalClusterOptRef = absl::optional>; +diff -Naur envoy/source/common/redis/async_client_impl.cc envoy-new/source/common/redis/async_client_impl.cc +--- envoy/source/common/redis/async_client_impl.cc 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/source/common/redis/async_client_impl.cc 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,293 @@ ++#include "source/common/redis/async_client_impl.h" ++ ++#include ++#include ++#include ++#include ++ ++#include "source/common/common/assert.h" ++#include "source/common/common/logger.h" ++#include "source/common/stats/utility.h" ++ ++namespace Envoy { ++namespace Redis { ++ ++AsyncClientImpl::AsyncClientImpl( ++ Upstream::ThreadLocalCluster* cluster, Event::Dispatcher& dispatcher, ++ RawClientFactory& client_factory, Stats::ScopeSharedPtr&& stats_scope, ++ RedisCommandStatsSharedPtr redis_command_stats, ++ Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager) ++ : cluster_name_(cluster->info()->name()), cluster_(cluster), dispatcher_(dispatcher), ++ drain_timer_(dispatcher.createTimer([this]() -> void { drainClients(); })), ++ client_factory_(client_factory), config_(new ConfigImpl()), stats_scope_(stats_scope), ++ redis_command_stats_(std::move(redis_command_stats)), ++ redis_cluster_stats_{REDIS_CLUSTER_STATS(POOL_COUNTER(*stats_scope_))}, ++ refresh_manager_(std::move(refresh_manager)) { ++ ++ host_set_member_update_cb_handle_ = cluster_->prioritySet().addMemberUpdateCb( ++ [this](const std::vector& hosts_added, ++ const std::vector& hosts_removed) -> void { ++ onHostsAdded(hosts_added); ++ onHostsRemoved(hosts_removed); ++ }); ++ ++ for (const auto& i : cluster_->prioritySet().hostSetsPerPriority()) { ++ for (auto& host : i->hosts()) { ++ host_address_map_[host->address()->asString()] = host; ++ } ++ } ++} ++ ++AsyncClientImpl::~AsyncClientImpl() { ++ while (!pending_requests_.empty()) { ++ pending_requests_.pop_front(); ++ } ++ while (!client_map_.empty()) { ++ client_map_.begin()->second->redis_client_->close(); ++ } ++ while (!clients_to_drain_.empty()) { ++ (*clients_to_drain_.begin())->redis_client_->close(); ++ } ++} ++ ++void AsyncClientImpl::initialize(AsyncClientConfig config) { ++ while (!client_map_.empty()) { ++ client_map_.begin()->second->redis_client_->close(); ++ } ++ while (!clients_to_drain_.empty()) { ++ (*clients_to_drain_.begin())->redis_client_->close(); ++ } ++ ++ config_ = std::make_shared(config); ++ auth_username_ = config.auth_username_; ++ auth_password_ = config.auth_password_; ++} ++ ++PoolRequest* AsyncClientImpl::send(std::string&& query, AsyncClient::Callbacks& callbacks) { ++ if (cluster_ == nullptr) { ++ ASSERT(client_map_.empty()); ++ ASSERT(host_set_member_update_cb_handle_ == nullptr); ++ return nullptr; ++ } ++ ++ Upstream::LoadBalancerContextBase lb_context; ++ Upstream::HostConstSharedPtr host = cluster_->loadBalancer().chooseHost(&lb_context); ++ if (!host) { ++ ENVOY_LOG(debug, "no available host"); ++ return nullptr; ++ } ++ pending_requests_.emplace_back(*this, std::move(query), callbacks); ++ PendingRequest& pending_request = pending_requests_.back(); ++ ThreadLocalActiveClientPtr& client = this->threadLocalActiveClient(host); ++ pending_request.request_handler_ = ++ client->redis_client_->makeRawRequest(pending_request.incoming_request_, pending_request); ++ if (pending_request.request_handler_) { ++ return &pending_request; ++ } else { ++ onRequestCompleted(); ++ return nullptr; ++ } ++} ++ ++PoolRequest* AsyncClientImpl::sendToHost(const std::string& host_address, absl::string_view request, ++ RawClientCallbacks& callbacks) { ++ if (cluster_ == nullptr) { ++ ASSERT(client_map_.empty()); ++ ASSERT(host_set_member_update_cb_handle_ == nullptr); ++ return nullptr; ++ } ++ ++ auto colon_pos = host_address.rfind(':'); ++ if ((colon_pos == std::string::npos) || (colon_pos == (host_address.size() - 1))) { ++ return nullptr; ++ } ++ ++ const std::string ip_address = host_address.substr(0, colon_pos); ++ const bool ipv6 = (ip_address.find(':') != std::string::npos); ++ std::string host_address_map_key; ++ Network::Address::InstanceConstSharedPtr address_ptr; ++ ++ if (!ipv6) { ++ host_address_map_key = host_address; ++ } else { ++ const auto ip_port = absl::string_view(host_address).substr(colon_pos + 1); ++ uint32_t ip_port_number; ++ if (!absl::SimpleAtoi(ip_port, &ip_port_number) || (ip_port_number > 65535)) { ++ return nullptr; ++ } ++ try { ++ address_ptr = std::make_shared(ip_address, ip_port_number); ++ } catch (const EnvoyException&) { ++ return nullptr; ++ } ++ host_address_map_key = address_ptr->asString(); ++ } ++ ++ auto it = host_address_map_.find(host_address_map_key); ++ if (it == host_address_map_.end()) { ++ // This host is not known to the cluster manager. Create a new host and insert it into the map. ++ if (created_via_redirect_hosts_.size() == config_->maxUpstreamUnknownConnections()) { ++ // Too many upstream connections to unknown hosts have been created. ++ redis_cluster_stats_.max_upstream_unknown_connections_reached_.inc(); ++ return nullptr; ++ } ++ if (!ipv6) { ++ // Only create an IPv4 address instance if we need a new Upstream::HostImpl. ++ const auto ip_port = absl::string_view(host_address).substr(colon_pos + 1); ++ uint32_t ip_port_number; ++ if (!absl::SimpleAtoi(ip_port, &ip_port_number) || (ip_port_number > 65535)) { ++ return nullptr; ++ } ++ try { ++ address_ptr = std::make_shared(ip_address, ip_port_number); ++ } catch (const EnvoyException&) { ++ return nullptr; ++ } ++ } ++ Upstream::HostSharedPtr new_host{new Upstream::HostImpl( ++ cluster_->info(), "", address_ptr, nullptr, 1, envoy::config::core::v3::Locality(), ++ envoy::config::endpoint::v3::Endpoint::HealthCheckConfig::default_instance(), 0, ++ envoy::config::core::v3::UNKNOWN, dispatcher_.timeSource())}; ++ host_address_map_[host_address_map_key] = new_host; ++ created_via_redirect_hosts_.push_back(new_host); ++ it = host_address_map_.find(host_address_map_key); ++ } ++ ++ ThreadLocalActiveClientPtr& client = threadLocalActiveClient(it->second); ++ ++ return client->redis_client_->makeRawRequest(request, callbacks); ++} ++ ++void AsyncClientImpl::ThreadLocalActiveClient::onEvent(Network::ConnectionEvent event) { ++ if (event == Network::ConnectionEvent::RemoteClose || ++ event == Network::ConnectionEvent::LocalClose) { ++ auto client_to_delete = parent_.client_map_.find(host_); ++ if (client_to_delete != parent_.client_map_.end()) { ++ parent_.dispatcher_.deferredDelete(std::move(redis_client_)); ++ parent_.client_map_.erase(client_to_delete); ++ } else { ++ for (auto it = parent_.clients_to_drain_.begin(); it != parent_.clients_to_drain_.end(); ++ it++) { ++ if ((*it).get() == this) { ++ if (!redis_client_->active()) { ++ parent_.redis_cluster_stats_.upstream_cx_drained_.inc(); ++ } ++ parent_.dispatcher_.deferredDelete(std::move(redis_client_)); ++ parent_.clients_to_drain_.erase(it); ++ break; ++ } ++ } ++ } ++ } ++} ++ ++AsyncClientImpl::PendingRequest::PendingRequest(AsyncClientImpl& parent, ++ std::string&& incoming_request, ++ Callbacks& callbacks) ++ : parent_(parent), incoming_request_(incoming_request), callbacks_(callbacks) {} ++ ++AsyncClientImpl::PendingRequest::~PendingRequest() { ++ if (request_handler_) { ++ request_handler_->cancel(); ++ request_handler_ = nullptr; ++ ++ // treat canceled request as failure ++ callbacks_.onFailure(incoming_request_); ++ } ++} ++ ++void AsyncClientImpl::PendingRequest::onResponse(std::string&& response) { ++ request_handler_ = nullptr; ++ callbacks_.onSuccess(incoming_request_, std::move(response)); ++ parent_.onRequestCompleted(); ++} ++ ++void AsyncClientImpl::PendingRequest::onFailure() { ++ request_handler_ = nullptr; ++ callbacks_.onFailure(incoming_request_); ++ // refresh_manager is not constructed ++ // parent.refresh_manager_->onFailure(parent_.cluster_name); ++ parent_.onRequestCompleted(); ++} ++ ++void AsyncClientImpl::PendingRequest::cancel() { ++ request_handler_->cancel(); ++ request_handler_ = nullptr; ++ parent_.onRequestCompleted(); ++} ++ ++void AsyncClientImpl::onHostsAdded(const std::vector& host_added) { ++ for (const auto& host : host_added) { ++ std::string host_address = host->address()->asString(); ++ // Insert new host into address map, possibly overwriting a previous host's entry. ++ host_address_map_[host_address] = host; ++ for (const auto& created_host : created_via_redirect_hosts_) { ++ if (created_host->address()->asString() == host_address) { ++ // Remove our "temporary" host create in sendRequestToHost(). ++ onHostsRemoved({created_host}); ++ created_via_redirect_hosts_.remove(created_host); ++ break; ++ } ++ } ++ } ++} ++ ++void AsyncClientImpl::onHostsRemoved(const std::vector& host_removed) { ++ for (const auto& host : host_removed) { ++ auto it = client_map_.find(host); ++ if (it != client_map_.end()) { ++ if (it->second->redis_client_->active()) { ++ clients_to_drain_.push_back(std::move(it->second)); ++ client_map_.erase(it); ++ if (!drain_timer_->enabled()) { ++ drain_timer_->enableTimer(std::chrono::seconds(1)); ++ } ++ } else { ++ // There is no pending requests so close the connection ++ it->second->redis_client_->close(); ++ } ++ } ++ // There is the possibility that multiple hosts with the same address ++ // are registered in host_address_map_ given that hosts may be created ++ // upon redirection or supplied as part of the cluster's definition. ++ // only remove cluster defined host here. ++ auto it2 = host_address_map_.find(host->address()->asString()); ++ if (it2 != host_address_map_.end() && (it2->second == host)) { ++ host_address_map_.erase(it2); ++ } ++ } ++} ++ ++void AsyncClientImpl::drainClients() { ++ while (!clients_to_drain_.empty() && !(*clients_to_drain_.begin())->redis_client_->active()) { ++ (*clients_to_drain_.begin())->redis_client_->close(); ++ } ++ if (!clients_to_drain_.empty()) { ++ drain_timer_->enableTimer(std::chrono::seconds(1)); ++ } ++} ++ ++AsyncClientImpl::ThreadLocalActiveClientPtr& ++AsyncClientImpl::threadLocalActiveClient(Upstream::HostConstSharedPtr host) { ++ ThreadLocalActiveClientPtr& client = client_map_[host]; ++ if (!client) { ++ client = std::make_unique(*this); ++ client->host_ = host; ++ client->redis_client_ = ++ client_factory_.create(host, dispatcher_, *config_, redis_command_stats_, *(stats_scope_), ++ auth_username_, auth_password_); ++ client->redis_client_->addConnectionCallbacks(*client); ++ } ++ return client; ++} ++ ++void AsyncClientImpl::onRequestCompleted() { ++ ASSERT(!pending_requests_.empty()); ++ ++ while (!pending_requests_.empty() && !pending_requests_.front().request_handler_) { ++ pending_requests_.pop_front(); ++ } ++} ++ ++} // namespace Redis ++} // namespace Envoy +diff -Naur envoy/source/common/redis/async_client_impl.h envoy-new/source/common/redis/async_client_impl.h +--- envoy/source/common/redis/async_client_impl.h 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/source/common/redis/async_client_impl.h 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,158 @@ ++#pragma once ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "envoy/stats/stats_macros.h" ++#include "envoy/redis/async_client.h" ++ ++#include "source/common/network/address_impl.h" ++#include "source/common/upstream/load_balancer_impl.h" ++#include "source/common/upstream/upstream_impl.h" ++#include "source/extensions/common/redis/cluster_refresh_manager.h" ++#include "source/extensions/filters/network/common/redis/raw_client_impl.h" ++ ++#include "absl/container/node_hash_map.h" ++ ++namespace Envoy { ++namespace Upstream { ++class ThreadLocalCluster; ++} ++namespace Redis { ++ ++#define REDIS_CLUSTER_STATS(COUNTER) \ ++ COUNTER(upstream_cx_drained) \ ++ COUNTER(max_upstream_unknown_connections_reached) ++ ++struct RedisClusterStats { ++ REDIS_CLUSTER_STATS(GENERATE_COUNTER_STRUCT) ++}; ++ ++using Envoy::Extensions::NetworkFilters::Common::Redis::RedisCommandStatsSharedPtr; ++using Envoy::Extensions::NetworkFilters::Common::Redis::Client::Config; ++using Envoy::Extensions::NetworkFilters::Common::Redis::Client::ConfigSharedPtr; ++using Envoy::Extensions::NetworkFilters::Common::Redis::Client::ReadPolicy; ++ ++using Envoy::Extensions::NetworkFilters::Common::Redis::Client::RawClientCallbacks; ++using Envoy::Extensions::NetworkFilters::Common::Redis::Client::RawClientFactory; ++using Envoy::Extensions::NetworkFilters::Common::Redis::Client::RawClientPtr; ++ ++class ConfigImpl : public Config { ++public: ++ ConfigImpl() ++ : op_timeout_(std::chrono::milliseconds(1000)), max_buffer_size_before_flush_(1024), ++ buffer_flush_timeout_(3), max_upstream_unknown_connections_(100), ++ enable_command_stats_(true) {} ++ explicit ConfigImpl(const AsyncClientConfig& config) ++ : op_timeout_(config.op_timeout_), ++ max_buffer_size_before_flush_(config.max_buffer_size_before_flush_), ++ buffer_flush_timeout_(config.buffer_flush_timeout_), ++ max_upstream_unknown_connections_(config.max_upstream_unknown_connections_), ++ enable_command_stats_(config.enable_command_stats_) {} ++ ++ std::chrono::milliseconds opTimeout() const override { return op_timeout_; } ++ bool disableOutlierEvents() const override { return false; } ++ bool enableHashtagging() const override { return false; } ++ bool enableRedirection() const override { return false; } ++ uint32_t maxBufferSizeBeforeFlush() const override { return max_buffer_size_before_flush_; } ++ std::chrono::milliseconds bufferFlushTimeoutInMs() const override { ++ return buffer_flush_timeout_; ++ } ++ uint32_t maxUpstreamUnknownConnections() const override { ++ return max_upstream_unknown_connections_; ++ } ++ bool enableCommandStats() const override { return enable_command_stats_; } ++ ReadPolicy readPolicy() const override { return ReadPolicy::Primary; } ++ ++ const std::chrono::milliseconds op_timeout_; ++ const uint32_t max_buffer_size_before_flush_; ++ const std::chrono::milliseconds buffer_flush_timeout_; ++ const uint32_t max_upstream_unknown_connections_; ++ const bool enable_command_stats_; ++}; ++ ++class AsyncClientImpl : public AsyncClient, ++ public std::enable_shared_from_this, ++ public Logger::Loggable { ++public: ++ AsyncClientImpl(Upstream::ThreadLocalCluster* cluster, Event::Dispatcher& dispatcher, ++ RawClientFactory& client_factory, Stats::ScopeSharedPtr&& stats_scope, ++ RedisCommandStatsSharedPtr redis_command_stats, ++ Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager); ++ ~AsyncClientImpl() override; ++ ++ // Envoy::Redis::AsyncClient ++ void initialize(AsyncClientConfig config) override; ++ PoolRequest* send(std::string&& query, Callbacks& callbacks) override; ++ PoolRequest* sendToHost(const std::string& host_address, absl::string_view request, ++ RawClientCallbacks& callbacks); ++ Event::Dispatcher& dispatcher() override { return dispatcher_; } ++ ++private: ++ struct ThreadLocalActiveClient : public Network::ConnectionCallbacks { ++ ThreadLocalActiveClient(AsyncClientImpl& parent) : parent_(parent) {} ++ ++ // Network::ConnectionCallbacks ++ void onEvent(Network::ConnectionEvent event) override; ++ void onAboveWriteBufferHighWatermark() override {} ++ void onBelowWriteBufferLowWatermark() override {} ++ ++ AsyncClientImpl& parent_; ++ Upstream::HostConstSharedPtr host_; ++ RawClientPtr redis_client_; ++ }; ++ ++ using ThreadLocalActiveClientPtr = std::unique_ptr; ++ ++ struct PendingRequest : public RawClientCallbacks, public PoolRequest { ++ PendingRequest(AsyncClientImpl& parent, std::string&& incoming_request, Callbacks& callbacks); ++ ~PendingRequest() override; ++ ++ // Common::Redis::Client::RawClientCallbacks ++ void onResponse(std::string&& response) override; ++ void onFailure() override; ++ ++ // PoolRequest ++ void cancel() override; ++ ++ AsyncClientImpl& parent_; ++ std::string incoming_request_; ++ PoolRequest* request_handler_; ++ Callbacks& callbacks_; ++ }; ++ ++ void onHostsAdded(const std::vector& host_added); ++ void onHostsRemoved(const std::vector& host_removed); ++ void drainClients(); ++ ++ ThreadLocalActiveClientPtr& threadLocalActiveClient(Upstream::HostConstSharedPtr host); ++ ++ void onRequestCompleted(); ++ ++ const std::string cluster_name_; ++ Upstream::ThreadLocalCluster* cluster_{}; ++ Event::Dispatcher& dispatcher_; ++ absl::node_hash_map client_map_; ++ Envoy::Common::CallbackHandlePtr host_set_member_update_cb_handle_; ++ absl::node_hash_map host_address_map_; ++ std::string auth_username_; ++ std::string auth_password_; ++ std::list created_via_redirect_hosts_; ++ std::list clients_to_drain_; ++ std::list pending_requests_; ++ ++ Event::TimerPtr drain_timer_; ++ RawClientFactory& client_factory_; ++ ConfigSharedPtr config_; ++ Stats::ScopeSharedPtr stats_scope_; ++ RedisCommandStatsSharedPtr redis_command_stats_; ++ RedisClusterStats redis_cluster_stats_; ++ const Extensions::Common::Redis::ClusterRefreshManagerSharedPtr refresh_manager_; ++}; ++ ++} // namespace Redis ++} // namespace Envoy +diff -Naur envoy/source/common/redis/BUILD envoy-new/source/common/redis/BUILD +--- envoy/source/common/redis/BUILD 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/source/common/redis/BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,27 @@ ++load( ++ "//bazel:envoy_build_system.bzl", ++ "envoy_cc_library", ++ "envoy_package", ++ "envoy_select_enable_http3", ++) ++ ++licenses(["notice"]) # Apache 2 ++ ++envoy_package() ++ ++envoy_cc_library( ++ name = "async_client_lib", ++ srcs = ["async_client_impl.cc"], ++ hdrs = ["async_client_impl.h"], ++ deps = [ ++ "//envoy/stats:stats_macros", ++ "//envoy/redis:async_client_interface", ++ ++ "//source/common/network:address_lib", ++ "//source/common/upstream:upstream_lib", ++ "//source/common/upstream:load_balancer_lib", ++ ++ "//source/extensions/common/redis:cluster_refresh_manager_lib", ++ "//source/extensions/filters/network/common/redis:raw_client_lib", ++ ], ++) +diff -Naur envoy/source/common/upstream/BUILD envoy-new/source/common/upstream/BUILD +--- envoy/source/common/upstream/BUILD 2024-05-19 11:59:43.030438777 +0800 ++++ envoy-new/source/common/upstream/BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -96,6 +96,9 @@ + "//source/common/http/http3:conn_pool_lib", + "//source/common/http:conn_pool_grid", + ]), ++ alimesh_deps = [ ++ "//source/common/redis:async_client_lib", ++ ] + ) + + envoy_cc_library( +diff -Naur envoy/source/common/upstream/cluster_manager_impl.cc envoy-new/source/common/upstream/cluster_manager_impl.cc +--- envoy/source/common/upstream/cluster_manager_impl.cc 2024-05-19 11:59:43.030438777 +0800 ++++ envoy-new/source/common/upstream/cluster_manager_impl.cc 2024-05-19 11:59:31.674438554 +0800 +@@ -49,6 +49,10 @@ + #include "source/common/http/http3/conn_pool.h" + #endif + ++#if defined(ALIMESH) ++#include "source/common/redis/async_client_impl.h" ++#endif ++ + namespace Envoy { + namespace Upstream { + namespace { +@@ -1077,6 +1081,25 @@ + return http_async_client_; + } + ++#if defined(ALIMESH) ++Redis::AsyncClient& ++ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::redisAsyncClient() { ++ using Extensions::NetworkFilters::Common::Redis::RedisCommandStats; ++ using Extensions::NetworkFilters::Common::Redis::Client::RawClientFactoryImpl; ++ ++ if (lazy_redis_async_client_ == nullptr) { ++ auto redis_command_stats = ++ RedisCommandStats::createRedisCommandStats(parent_.parent_.stats_.symbolTable()); ++ lazy_redis_async_client_ = std::make_unique( ++ this, parent_.thread_local_dispatcher_, RawClientFactoryImpl::instance_, ++ parent_.parent_.stats_.createScope( ++ fmt::format("cluster.{}.redis_cluster", cluster_info_->name())), ++ redis_command_stats, nullptr); ++ } ++ return *lazy_redis_async_client_; ++} ++#endif ++ + void ClusterManagerImpl::ThreadLocalClusterManagerImpl::ClusterEntry::updateHosts( + const std::string& name, uint32_t priority, + PrioritySet::UpdateHostsParams&& update_hosts_params, +diff -Naur envoy/source/common/upstream/cluster_manager_impl.h envoy-new/source/common/upstream/cluster_manager_impl.h +--- envoy/source/common/upstream/cluster_manager_impl.h 2024-05-19 11:59:43.034438777 +0800 ++++ envoy-new/source/common/upstream/cluster_manager_impl.h 2024-05-19 11:59:31.674438554 +0800 +@@ -421,7 +421,9 @@ + LoadBalancerContext* context) override; + Host::CreateConnectionData tcpConn(LoadBalancerContext* context) override; + Http::AsyncClient& httpAsyncClient() override; +- ++#if defined(ALIMESH) ++ Redis::AsyncClient& redisAsyncClient() override; ++#endif + // Updates the hosts in the priority set. + void updateHosts(const std::string& name, uint32_t priority, + PrioritySet::UpdateHostsParams&& update_hosts_params, +@@ -456,6 +458,9 @@ + LoadBalancerPtr lb_; + ClusterInfoConstSharedPtr cluster_info_; + Http::AsyncClientImpl http_async_client_; ++#if defined(ALIMESH) ++ Redis::AsyncClientPtr lazy_redis_async_client_; ++#endif + }; + + using ClusterEntryPtr = std::unique_ptr; +diff -Naur envoy/source/extensions/common/redis/BUILD envoy-new/source/extensions/common/redis/BUILD +--- envoy/source/extensions/common/redis/BUILD 2024-05-19 11:59:43.038438777 +0800 ++++ envoy-new/source/extensions/common/redis/BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -23,6 +23,11 @@ + name = "cluster_refresh_manager_lib", + srcs = ["cluster_refresh_manager_impl.cc"], + hdrs = ["cluster_refresh_manager_impl.h"], ++ visibility = [ ++ "//:contrib_library", ++ "//:extension_library", ++ "//source/common/redis:__pkg__", ++ ], + deps = [ + ":cluster_refresh_manager_interface", + "//envoy/event:dispatcher_interface", +diff -Naur envoy/source/extensions/common/wasm/context.cc envoy-new/source/extensions/common/wasm/context.cc +--- envoy/source/extensions/common/wasm/context.cc 2024-05-19 11:59:43.150438779 +0800 ++++ envoy-new/source/extensions/common/wasm/context.cc 2024-05-19 11:59:31.674438554 +0800 +@@ -41,6 +41,7 @@ + #include "eval/public/containers/field_backed_list_impl.h" + #include "eval/public/containers/field_backed_map_impl.h" + #include "eval/public/structs/cel_proto_wrapper.h" ++#include "include/proxy-wasm/pairs_util.h" + #include "openssl/bytestring.h" + #include "openssl/hmac.h" + #include "openssl/sha.h" +@@ -58,6 +59,11 @@ + // FilterState prefix for CelState values. + constexpr absl::string_view CelStateKeyPrefix = "wasm."; + ++#if defined(ALIMESH) ++constexpr std::string_view ClearRouteCacheKey = "clear_route_cache"; ++constexpr std::string_view DisableClearRouteCache = "off"; ++#endif ++ + using HashPolicy = envoy::config::route::v3::RouteAction::HashPolicy; + using CelState = Filters::Common::Expr::CelState; + using CelStatePrototype = Filters::Common::Expr::CelStatePrototype; +@@ -378,10 +384,12 @@ + return WasmResult::SerializationFailure; + } + } +- auto size = proxy_wasm::exports::pairsSize(pairs); ++ auto size = proxy_wasm::PairsUtil::pairsSize(pairs); + // prevent string inlining which violates byte alignment + result->resize(std::max(size, static_cast(30))); +- proxy_wasm::exports::marshalPairs(pairs, result->data()); ++ if (!proxy_wasm::PairsUtil::marshalPairs(pairs, result->data(), size)) { ++ return WasmResult::SerializationFailure; ++ } + result->resize(size); + return WasmResult::Ok; + } +@@ -393,13 +401,15 @@ + return WasmResult::SerializationFailure; + } + } +- auto size = proxy_wasm::exports::pairsSize(pairs); ++ auto size = proxy_wasm::PairsUtil::pairsSize(pairs); + // prevent string inlining which violates byte alignment + if (size < 30) { + result->reserve(30); + } + result->resize(size); +- proxy_wasm::exports::marshalPairs(pairs, result->data()); ++ if (!proxy_wasm::PairsUtil::marshalPairs(pairs, result->data(), size)) { ++ return WasmResult::SerializationFailure; ++ } + return WasmResult::Ok; + } + default: +@@ -553,6 +563,9 @@ + break; + case PropertyToken::ROUTE_NAME: + #if defined(ALIMESH) ++ if (info && !info->getRouteName().empty()) { ++ return CelValue::CreateString(&info->getRouteName()); ++ } + if (filter_callbacks) { + auto route = filter_callbacks->route(); + if (route) { +@@ -566,10 +579,11 @@ + } + } + } +-#endif ++#else + if (info) { + return CelValue::CreateString(&info->getRouteName()); + } ++#endif + break; + case PropertyToken::ROUTE_METADATA: + if (info && info->route()) { +@@ -586,9 +600,12 @@ + case PropertyToken::PLUGIN_VM_ID: + return CelValue::CreateStringView(toAbslStringView(wasm()->vm_id())); + case PropertyToken::FILTER_STATE: +- return Protobuf::Arena::Create(arena, +- info->filterState()) +- ->Produce(arena); ++ if (info) { ++ return Protobuf::Arena::Create(arena, ++ info->filterState()) ++ ->Produce(arena); ++ } ++ break; + } + return {}; + } +@@ -747,7 +764,8 @@ + } + const Http::LowerCaseString lower_key{std::string(key)}; + map->addCopy(lower_key, std::string(value)); +- if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_) { ++ if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_ && ++ !disable_clear_route_cache_) { + decoder_callbacks_->clearRouteCache(); + } + return WasmResult::Ok; +@@ -822,7 +840,8 @@ + const Http::LowerCaseString lower_key{std::string(p.first)}; + map->addCopy(lower_key, std::string(p.second)); + } +- if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_) { ++ if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_ && ++ !disable_clear_route_cache_) { + decoder_callbacks_->clearRouteCache(); + } + return WasmResult::Ok; +@@ -835,7 +854,8 @@ + } + const Http::LowerCaseString lower_key{std::string(key)}; + map->remove(lower_key); +- if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_) { ++ if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_ && ++ !disable_clear_route_cache_) { + decoder_callbacks_->clearRouteCache(); + } + return WasmResult::Ok; +@@ -849,7 +869,8 @@ + } + const Http::LowerCaseString lower_key{std::string(key)}; + map->setCopy(lower_key, toAbslStringView(value)); +- if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_) { ++ if (type == WasmHeaderMapType::RequestHeaders && decoder_callbacks_ && ++ !disable_clear_route_cache_) { + decoder_callbacks_->clearRouteCache(); + } + return WasmResult::Ok; +@@ -910,6 +931,10 @@ + std::string_view(static_cast(body.linearize(body.length())), body.length())); + } + return nullptr; ++#if defined(ALIMESH) ++ case WasmBufferType::RedisCallResponse: ++ return buffer_.set(rootContext()->redis_call_response_); ++#endif + case WasmBufferType::GrpcReceiveBuffer: + return buffer_.set(rootContext()->grpc_receive_buffer_.get()); + default: +@@ -989,6 +1014,95 @@ + return WasmResult::Ok; + } + ++#if defined(ALIMESH) ++WasmResult Context::redisInit(std::string_view cluster, std::string_view username, ++ std::string_view password, int timeout_milliseconds) { ++ auto cluster_string = std::string(cluster); ++ const auto thread_local_cluster = clusterManager().getThreadLocalCluster(cluster_string); ++ if (thread_local_cluster == nullptr) { ++ return WasmResult::BadArgument; ++ } ++ ++ Redis::AsyncClientConfig config(std::string(username), std::string(password), ++ timeout_milliseconds); ++ thread_local_cluster->redisAsyncClient().initialize(config); ++ ++ return WasmResult::Ok; ++} ++ ++WasmResult Context::redisCall(std::string_view cluster, std::string_view query, ++ uint32_t* token_ptr) { ++ auto cluster_string = std::string(cluster); ++ const auto thread_local_cluster = clusterManager().getThreadLocalCluster(cluster_string); ++ if (thread_local_cluster == nullptr) { ++ return WasmResult::BadArgument; ++ } ++ ++ uint32_t token = wasm()->nextRedisCallId(); ++ auto& handler = redis_request_[token]; ++ handler.context_ = this; ++ handler.token_ = token; ++ ++ auto redis_request = thread_local_cluster->redisAsyncClient().send(std::string(query), handler); ++ if (!redis_request) { ++ redis_request_.erase(token); ++ return WasmResult::InternalFailure; ++ } ++ handler.request_ = redis_request; ++ *token_ptr = token; ++ return WasmResult::Ok; ++} ++ ++void Context::onRedisCallSuccess(uint32_t token, std::string&& response) { ++ if (proxy_wasm::current_context_ != nullptr) { ++ // We are in a reentrant call, so defer. ++ wasm()->addAfterVmCallAction([this, token, response = std::move(response)]() mutable { ++ onRedisCallSuccess(token, std::move(response)); ++ }); ++ return; ++ } ++ ++ auto handler = redis_request_.find(token); ++ if (handler == redis_request_.end()) { ++ return; ++ } ++ ++ uint32_t body_size = response.size(); ++ redis_call_response_ = std::move(response); ++ // Deferred "after VM call" actions are going to be executed upon returning from ++ // ContextBase::*, which might include deleting Context object via proxy_done(). ++ wasm()->addAfterVmCallAction([this, handler] { ++ redis_call_response_.clear(); ++ redis_request_.erase(handler); ++ }); ++ proxy_wasm::ContextBase::onRedisCallResponse( ++ token, static_cast(proxy_wasm::RedisStatus::Ok), body_size); ++} ++ ++void Context::onRedisCallFailure(uint32_t token) { ++ if (proxy_wasm::current_context_ != nullptr) { ++ // We are in a reentrant call, so defer. ++ wasm()->addAfterVmCallAction([this, token] { onRedisCallFailure(token); }); ++ return; ++ } ++ ++ auto handler = redis_request_.find(token); ++ if (handler == redis_request_.end()) { ++ return; ++ } ++ status_code_ = static_cast(WasmResult::BrokenConnection); ++ status_message_ = "reset"; ++ // Deferred "after VM call" actions are going to be executed upon returning from ++ // ContextBase::*, which might include deleting Context object via proxy_done(). ++ wasm()->addAfterVmCallAction([this, handler] { ++ status_message_ = ""; ++ redis_request_.erase(handler); ++ }); ++ proxy_wasm::ContextBase::onRedisCallResponse( ++ token, static_cast(proxy_wasm::RedisStatus::NetworkError), 0); ++} ++#endif ++ + WasmResult Context::grpcCall(std::string_view grpc_service, std::string_view service_name, + std::string_view method_name, const Pairs& initial_metadata, + std::string_view request, std::chrono::milliseconds timeout, +@@ -1159,6 +1273,15 @@ + StreamInfo::FilterState::StateType::Mutable, + prototype.life_span_); + } ++#if defined(ALIMESH) ++ if (path == ClearRouteCacheKey) { ++ if (value == DisableClearRouteCache) { ++ disable_clear_route_cache_ = true; ++ } else { ++ disable_clear_route_cache_ = false; ++ } ++ } ++#endif + if (!state->setValue(toAbslStringView(value))) { + return WasmResult::BadArgument; + } +@@ -1371,6 +1494,11 @@ + for (auto& p : grpc_stream_) { + p.second.stream_->resetStream(); + } ++#if defined(ALIMESH) ++ for (auto& p : redis_request_) { ++ p.second.request_->cancel(); ++ } ++#endif + } + + Network::FilterStatus convertNetworkFilterStatus(proxy_wasm::FilterStatus status) { +@@ -1823,10 +1951,14 @@ + } + http_call_response_ = &response; + uint32_t body_size = response->body().length(); +- onHttpCallResponse(token, response->headers().size(), body_size, +- headerSize(response->trailers())); +- http_call_response_ = nullptr; +- http_request_.erase(handler); ++ // Deferred "after VM call" actions are going to be executed upon returning from ++ // ContextBase::*, which might include deleting Context object via proxy_done(). ++ wasm()->addAfterVmCallAction([this, handler] { ++ http_call_response_ = nullptr; ++ http_request_.erase(handler); ++ }); ++ ContextBase::onHttpCallResponse(token, response->headers().size(), body_size, ++ headerSize(response->trailers())); + } + + void Context::onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason reason) { +@@ -1843,21 +1975,34 @@ + // This is the only value currently. + ASSERT(reason == Http::AsyncClient::FailureReason::Reset); + status_message_ = "reset"; +- onHttpCallResponse(token, 0, 0, 0); +- status_message_ = ""; +- http_request_.erase(handler); ++ // Deferred "after VM call" actions are going to be executed upon returning from ++ // ContextBase::*, which might include deleting Context object via proxy_done(). ++ wasm()->addAfterVmCallAction([this, handler] { ++ status_message_ = ""; ++ http_request_.erase(handler); ++ }); ++ ContextBase::onHttpCallResponse(token, 0, 0, 0); + } + + void Context::onGrpcReceiveWrapper(uint32_t token, ::Envoy::Buffer::InstancePtr response) { + ASSERT(proxy_wasm::current_context_ == nullptr); // Non-reentrant. ++ auto cleanup = [this, token] { ++ if (wasm()->isGrpcCallId(token)) { ++ grpc_call_request_.erase(token); ++ } ++ }; + if (wasm()->on_grpc_receive_) { + grpc_receive_buffer_ = std::move(response); + uint32_t response_size = grpc_receive_buffer_->length(); ++ // Deferred "after VM call" actions are going to be executed upon returning from ++ // ContextBase::*, which might include deleting Context object via proxy_done(). ++ wasm()->addAfterVmCallAction([this, cleanup] { ++ grpc_receive_buffer_.reset(); ++ cleanup(); ++ }); + ContextBase::onGrpcReceive(token, response_size); +- grpc_receive_buffer_.reset(); +- } +- if (wasm()->isGrpcCallId(token)) { +- grpc_call_request_.erase(token); ++ } else { ++ cleanup(); + } + } + +@@ -1870,21 +2015,30 @@ + }); + return; + } ++ auto cleanup = [this, token] { ++ if (wasm()->isGrpcCallId(token)) { ++ grpc_call_request_.erase(token); ++ } else if (wasm()->isGrpcStreamId(token)) { ++ auto it = grpc_stream_.find(token); ++ if (it != grpc_stream_.end()) { ++ if (it->second.local_closed_) { ++ grpc_stream_.erase(token); ++ } ++ } ++ } ++ }; + if (wasm()->on_grpc_close_) { + status_code_ = static_cast(status); + status_message_ = toAbslStringView(message); +- onGrpcClose(token, status_code_); +- status_message_ = ""; +- } +- if (wasm()->isGrpcCallId(token)) { +- grpc_call_request_.erase(token); +- } else if (wasm()->isGrpcStreamId(token)) { +- auto it = grpc_stream_.find(token); +- if (it != grpc_stream_.end()) { +- if (it->second.local_closed_) { +- grpc_stream_.erase(token); +- } +- } ++ // Deferred "after VM call" actions are going to be executed upon returning from ++ // ContextBase::*, which might include deleting Context object via proxy_done(). ++ wasm()->addAfterVmCallAction([this, cleanup] { ++ status_message_ = ""; ++ cleanup(); ++ }); ++ ContextBase::onGrpcClose(token, status_code_); ++ } else { ++ cleanup(); + } + } + +diff -Naur envoy/source/extensions/common/wasm/context.h envoy-new/source/extensions/common/wasm/context.h +--- envoy/source/extensions/common/wasm/context.h 2024-05-19 11:59:43.038438777 +0800 ++++ envoy-new/source/extensions/common/wasm/context.h 2024-05-19 11:59:31.674438554 +0800 +@@ -217,7 +217,11 @@ + Pairs additional_headers, uint32_t grpc_status, + std::string_view details) override; + void clearRouteCache() override { ++#if defined(ALIMESH) ++ if (decoder_callbacks_ && !disable_clear_route_cache_) { ++#else + if (decoder_callbacks_) { ++#endif + decoder_callbacks_->clearRouteCache(); + } + } +@@ -246,6 +250,14 @@ + std::string_view request_body, const Pairs& request_trailers, + int timeout_millisconds, uint32_t* token_ptr) override; + ++#if defined(ALIMESH) ++ // Redis ++ WasmResult redisInit(std::string_view cluster, std::string_view username, ++ std::string_view password, int timeout_milliseconds) override; ++ WasmResult redisCall(std::string_view cluster, std::string_view query, ++ uint32_t* token_ptr) override; ++#endif ++ + // Stats/Metrics + WasmResult defineMetric(uint32_t type, std::string_view name, uint32_t* metric_id_ptr) override; + WasmResult incrementMetric(uint32_t metric_id, int64_t offset) override; +@@ -329,6 +341,21 @@ + Http::AsyncClient::Request* request_; + }; + ++#if defined(ALIMESH) ++ struct RedisAsyncClientHandler : public Redis::AsyncClient::Callbacks { ++ // Redis::AsyncClient::Callbacks ++ void onSuccess(std::string_view, std::string&& response) override { ++ context_->onRedisCallSuccess(token_, std::move(response)); ++ } ++ ++ void onFailure(std::string_view) override { context_->onRedisCallFailure(token_); } ++ ++ Context* context_; ++ uint32_t token_; ++ Redis::PoolRequest* request_; ++ }; ++#endif ++ + struct GrpcCallClientHandler : public Grpc::RawAsyncRequestCallbacks { + // Grpc::AsyncRequestCallbacks + void onCreateInitialMetadata(Http::RequestHeaderMap& initial_metadata) override { +@@ -379,6 +406,11 @@ + void onHttpCallSuccess(uint32_t token, Envoy::Http::ResponseMessagePtr&& response); + void onHttpCallFailure(uint32_t token, Http::AsyncClient::FailureReason reason); + ++#if defined(ALIMESH) ++ void onRedisCallSuccess(uint32_t token, std::string&& response); ++ void onRedisCallFailure(uint32_t token); ++#endif ++ + void onGrpcCreateInitialMetadata(uint32_t token, Http::RequestHeaderMap& metadata); + void onGrpcReceiveInitialMetadataWrapper(uint32_t token, Http::HeaderMapPtr&& metadata); + void onGrpcReceiveWrapper(uint32_t token, ::Envoy::Buffer::InstancePtr response); +@@ -424,6 +456,11 @@ + // Only available during onHttpCallResponse. + Envoy::Http::ResponseMessagePtr* http_call_response_{}; + ++#if defined(ALIMESH) ++ // Only available during onRedisCallResponse. ++ std::string redis_call_response_{}; ++#endif ++ + Http::HeaderMapPtr grpc_receive_initial_metadata_{}; + Http::HeaderMapPtr grpc_receive_trailing_metadata_{}; + +@@ -450,6 +487,9 @@ + + // MB: must be a node-type map as we take persistent references to the entries. + std::map http_request_; ++#if defined(ALIMESH) ++ std::map redis_request_; ++#endif + std::map grpc_call_request_; + std::map grpc_stream_; + +@@ -464,6 +504,9 @@ + // Filter state prototype declaration. + absl::flat_hash_map + state_prototypes_; ++#if defined(ALIMESH) ++ bool disable_clear_route_cache_ = false; ++#endif + }; + using ContextSharedPtr = std::shared_ptr; + +diff -Naur envoy/source/extensions/common/wasm/ext/BUILD envoy-new/source/extensions/common/wasm/ext/BUILD +--- envoy/source/extensions/common/wasm/ext/BUILD 2024-05-19 11:59:43.038438777 +0800 ++++ envoy-new/source/extensions/common/wasm/ext/BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -52,6 +52,12 @@ + alwayslink = 1, + ) + ++filegroup( ++ name = "envoy_proxy_wasm_api_js", ++ srcs = ["envoy_proxy_wasm_api.js"], ++ visibility = ["//visibility:public"], ++) ++ + # NB: this target is compiled both to native code and to Wasm. Hence the generic rule. + proto_library( + name = "declare_property_proto", +diff -Naur envoy/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.js envoy-new/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.js +--- envoy/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.js 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/source/extensions/common/wasm/ext/envoy_proxy_wasm_api.js 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,3 @@ ++mergeInto(LibraryManager.library, { ++ envoy_resolve_dns: function() {}, ++}); +diff -Naur envoy/source/extensions/common/wasm/stats_handler.cc envoy-new/source/extensions/common/wasm/stats_handler.cc +--- envoy/source/extensions/common/wasm/stats_handler.cc 2024-05-19 11:59:43.038438777 +0800 ++++ envoy-new/source/extensions/common/wasm/stats_handler.cc 2024-05-19 11:59:31.674438554 +0800 +@@ -70,11 +70,31 @@ + switch (event) { + case WasmEvent::VmShutDown: + lifecycle_stats_.active_.set(--active_wasms); ++#ifdef ALIMESH ++ if (is_crashed_) { ++ is_crashed_ = false; ++ if (lifecycle_stats_.crash_.value() > 0) { ++ lifecycle_stats_.crash_.dec(); ++ } ++ } ++#endif + break; + case WasmEvent::VmCreated: + lifecycle_stats_.active_.set(++active_wasms); + lifecycle_stats_.created_.inc(); + break; ++#ifdef ALIMESH ++ case WasmEvent::RuntimeError: ++ if (!is_crashed_) { ++ is_crashed_ = true; ++ lifecycle_stats_.crash_.inc(); ++ lifecycle_stats_.crash_total_.inc(); ++ } ++ break; ++ case WasmEvent::RecoverError: ++ lifecycle_stats_.recover_error_.inc(); ++ break; ++#endif + default: + break; + } +diff -Naur envoy/source/extensions/common/wasm/stats_handler.h envoy-new/source/extensions/common/wasm/stats_handler.h +--- envoy/source/extensions/common/wasm/stats_handler.h 2024-05-19 11:59:43.038438777 +0800 ++++ envoy-new/source/extensions/common/wasm/stats_handler.h 2024-05-19 11:59:31.674438554 +0800 +@@ -31,12 +31,27 @@ + CREATE_WASM_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) + }; + ++#ifdef ALIMESH ++#define LIFECYCLE_STATS(COUNTER, GAUGE, PLUGIN_COUNTER, PLUGIN_GAUGE) \ ++ COUNTER(created) \ ++ GAUGE(active, NeverImport) \ ++ PLUGIN_COUNTER(recover_total) \ ++ PLUGIN_COUNTER(crash_total) \ ++ PLUGIN_COUNTER(recover_error) \ ++ PLUGIN_GAUGE(crash, NeverImport) ++#else + #define LIFECYCLE_STATS(COUNTER, GAUGE) \ + COUNTER(created) \ + GAUGE(active, NeverImport) ++#endif + + struct LifecycleStats { ++#ifdef ALIMESH ++ LIFECYCLE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT, GENERATE_COUNTER_STRUCT, ++ GENERATE_GAUGE_STRUCT) ++#else + LIFECYCLE_STATS(GENERATE_COUNTER_STRUCT, GENERATE_GAUGE_STRUCT) ++#endif + }; + + using ScopeWeakPtr = std::weak_ptr; +@@ -57,6 +72,9 @@ + RuntimeError, + VmCreated, + VmShutDown, ++#ifdef ALIMESH ++ RecoverError, ++#endif + }; + + class CreateStatsHandler : Logger::Loggable { +@@ -89,17 +107,36 @@ + + class LifecycleStatsHandler { + public: ++#ifdef ALIMESH ++ LifecycleStatsHandler(const Stats::ScopeSharedPtr& scope, std::string runtime, ++ std::string plugin_name) ++ : lifecycle_stats_(LifecycleStats{LIFECYCLE_STATS( ++ POOL_COUNTER_PREFIX(*scope, absl::StrCat("wasm.", runtime, ".")), ++ POOL_GAUGE_PREFIX(*scope, absl::StrCat("wasm.", runtime, ".")), ++ POOL_COUNTER_PREFIX(*scope, ++ absl::StrCat("wasm.", runtime, ".plugin.", plugin_name, ".")), ++ POOL_GAUGE_PREFIX(*scope, ++ absl::StrCat("wasm.", runtime, ".plugin.", plugin_name, ".")))}){}; ++#else + LifecycleStatsHandler(const Stats::ScopeSharedPtr& scope, std::string runtime) + : lifecycle_stats_(LifecycleStats{ + LIFECYCLE_STATS(POOL_COUNTER_PREFIX(*scope, absl::StrCat("wasm.", runtime, ".")), + POOL_GAUGE_PREFIX(*scope, absl::StrCat("wasm.", runtime, ".")))}){}; ++#endif + ~LifecycleStatsHandler() = default; + + void onEvent(WasmEvent event); + static int64_t getActiveVmCount(); + ++#ifdef ALIMESH ++ LifecycleStats& stats() { return lifecycle_stats_; } ++#endif ++ + protected: + LifecycleStats lifecycle_stats_; ++#ifdef ALIMESH ++ bool is_crashed_ = false; ++#endif + }; + + } // namespace Wasm +diff -Naur envoy/source/extensions/common/wasm/wasm.cc envoy-new/source/extensions/common/wasm/wasm.cc +--- envoy/source/extensions/common/wasm/wasm.cc 2024-05-19 11:59:43.038438777 +0800 ++++ envoy-new/source/extensions/common/wasm/wasm.cc 2024-05-19 11:59:31.674438554 +0800 +@@ -59,6 +59,34 @@ + return static_cast(base_wasm_handle->wasm().get()); + } + ++#ifdef ALIMESH ++WasmEvent failStateToWasmEvent(FailState state) { ++ switch (state) { ++ case FailState::Ok: ++ return WasmEvent::Ok; ++ case FailState::UnableToCreateVm: ++ return WasmEvent::UnableToCreateVm; ++ case FailState::UnableToCloneVm: ++ return WasmEvent::UnableToCloneVm; ++ case FailState::MissingFunction: ++ return WasmEvent::MissingFunction; ++ case FailState::UnableToInitializeCode: ++ return WasmEvent::UnableToInitializeCode; ++ case FailState::StartFailed: ++ return WasmEvent::StartFailed; ++ case FailState::ConfigureFailed: ++ return WasmEvent::ConfigureFailed; ++ case FailState::RuntimeError: ++ return WasmEvent::RuntimeError; ++ case FailState::RecoverError: ++ return WasmEvent::RecoverError; ++ } ++ PANIC("corrupt enum"); ++} ++ ++const int MIN_RECOVER_INTERVAL_SECONDS = 5; ++#endif ++ + } // namespace + + void Wasm::initializeLifecycle(Server::ServerLifecycleNotifier& lifecycle_notifier) { +@@ -81,8 +109,14 @@ + scope_(scope), stat_name_pool_(scope_->symbolTable()), + custom_stat_namespace_(stat_name_pool_.add(CustomStatNamespace)), + cluster_manager_(cluster_manager), dispatcher_(dispatcher), +- time_source_(dispatcher.timeSource()), lifecycle_stats_handler_(LifecycleStatsHandler( +- scope, config.config().vm_config().runtime())) { ++ time_source_(dispatcher.timeSource()), ++#ifdef ALIMESH ++ lifecycle_stats_handler_(LifecycleStatsHandler(scope, config.config().vm_config().runtime(), ++ config.config().name())) { ++#else ++ lifecycle_stats_handler_( ++ LifecycleStatsHandler(scope, config.config().vm_config().runtime()) { ++#endif + lifecycle_stats_handler_.onEvent(WasmEvent::VmCreated); + ENVOY_LOG(debug, "Base Wasm created {} now active", lifecycle_stats_handler_.getActiveVmCount()); + } +@@ -92,7 +126,7 @@ + [&base_wasm_handle]() { + return createWasmVm(absl::StrCat( + "envoy.wasm.runtime.", +- toAbslStringView(base_wasm_handle->wasm()->wasm_vm()->runtime()))); ++ toAbslStringView(base_wasm_handle->wasm()->wasm_vm()->getEngineName()))); + }), + scope_(getWasm(base_wasm_handle)->scope_), stat_name_pool_(scope_->symbolTable()), + custom_stat_namespace_(stat_name_pool_.add(CustomStatNamespace)), +@@ -100,6 +134,14 @@ + time_source_(dispatcher.timeSource()), + lifecycle_stats_handler_(getWasm(base_wasm_handle)->lifecycle_stats_handler_) { + lifecycle_stats_handler_.onEvent(WasmEvent::VmCreated); ++#ifdef ALIMESH ++ auto* vm = wasm_vm(); ++ if (vm) { ++ vm->addFailCallback([this](FailState fail_state) { ++ lifecycle_stats_handler_.onEvent(failStateToWasmEvent(fail_state)); ++ }); ++ } ++#endif + ENVOY_LOG(debug, "Thread-Local Wasm created {} now active", + lifecycle_stats_handler_.getActiveVmCount()); + } +@@ -150,6 +192,32 @@ + } + } + ++#if defined(ALIMESH) ++bool PluginHandleSharedPtrThreadLocal::recover() { ++ if (handle_ == nullptr || handle_->wasmHandle() == nullptr || ++ handle_->wasmHandle()->wasm() == nullptr) { ++ ENVOY_LOG(warn, "wasm has not been initialized"); ++ return false; ++ } ++ auto& dispatcher = handle_->wasmHandle()->wasm()->dispatcher(); ++ auto now = dispatcher.timeSource().monotonicTime() + cache_time_offset_for_testing; ++ if (now - last_recover_time_ < std::chrono::seconds(MIN_RECOVER_INTERVAL_SECONDS)) { ++ ENVOY_LOG(debug, "recover interval has not been reached"); ++ return false; ++ } ++ // Even if recovery fails, it will be retried after the interval ++ last_recover_time_ = now; ++ std::shared_ptr new_handle; ++ if (handle_->doRecover(new_handle)) { ++ handle_ = std::static_pointer_cast(new_handle); ++ handle_->wasmHandle()->wasm()->lifecycleStats().recover_total_.inc(); ++ ENVOY_LOG(info, "wasm vm recover from crash success"); ++ return true; ++ } ++ return false; ++} ++#endif ++ + // NOLINTNEXTLINE(readability-identifier-naming) + Word resolve_dns(Word dns_address_ptr, Word dns_address_size, Word token_ptr) { + auto context = static_cast(proxy_wasm::contextOrEffectiveContext()); +@@ -302,6 +370,10 @@ + return WasmEvent::ConfigureFailed; + case FailState::RuntimeError: + return WasmEvent::RuntimeError; ++#if defined(ALIMESH) ++ case FailState::RecoverError: ++ return WasmEvent::RecoverError; ++#endif + } + NOT_IMPLEMENTED_GCOVR_EXCL_LINE; + } +diff -Naur envoy/source/extensions/common/wasm/wasm.h envoy-new/source/extensions/common/wasm/wasm.h +--- envoy/source/extensions/common/wasm/wasm.h 2024-05-19 11:59:43.038438777 +0800 ++++ envoy-new/source/extensions/common/wasm/wasm.h 2024-05-19 11:59:31.674438554 +0800 +@@ -89,6 +89,10 @@ + } + void setFailStateForTesting(proxy_wasm::FailState fail_state) { failed_ = fail_state; } + ++#if defined(ALIMESH) ++ LifecycleStats& lifecycleStats() { return lifecycle_stats_handler_.stats(); } ++#endif ++ + protected: + friend class Context; + +@@ -151,13 +155,24 @@ + + using PluginHandleSharedPtr = std::shared_ptr; + ++#if defined(ALIMESH) ++class PluginHandleSharedPtrThreadLocal : public ThreadLocal::ThreadLocalObject, ++ public Logger::Loggable { ++public: ++ PluginHandleSharedPtrThreadLocal(PluginHandleSharedPtr handle) : handle_(handle){}; ++ bool recover(); ++#else + class PluginHandleSharedPtrThreadLocal : public ThreadLocal::ThreadLocalObject { + public: + PluginHandleSharedPtrThreadLocal(PluginHandleSharedPtr handle) : handle_(handle){}; ++#endif + PluginHandleSharedPtr& handle() { return handle_; } + + private: + PluginHandleSharedPtr handle_; ++#if defined(ALIMESH) ++ MonotonicTime last_recover_time_; ++#endif + }; + + using CreateWasmCallback = std::function; +diff -Naur envoy/source/extensions/filters/http/wasm/wasm_filter.h envoy-new/source/extensions/filters/http/wasm/wasm_filter.h +--- envoy/source/extensions/filters/http/wasm/wasm_filter.h 2024-05-19 11:59:43.150438779 +0800 ++++ envoy-new/source/extensions/filters/http/wasm/wasm_filter.h 2024-05-19 11:59:31.674438554 +0800 +@@ -31,22 +31,49 @@ + if (!tls_slot_->currentThreadRegistered()) { + return nullptr; + } +- PluginHandleSharedPtr handle = tls_slot_->get()->handle(); ++ auto opt_ref = tls_slot_->get(); ++ if (!opt_ref) { ++ return nullptr; ++ } ++ PluginHandleSharedPtr handle = opt_ref->handle(); + if (!handle) { + return nullptr; + } + if (handle->wasmHandle()) { + wasm = handle->wasmHandle()->wasm().get(); + } ++#if defined(ALIMESH) ++ auto failed = false; ++ if (!wasm) { ++ failed = true; ++ } else if (wasm->isFailed()) { ++ ENVOY_LOG(info, "wasm vm is crashed, try to recover"); ++ if (opt_ref->recover()) { ++ ENVOY_LOG(info, "wasm vm recover success"); ++ wasm = opt_ref->handle()->wasmHandle()->wasm().get(); ++ } else { ++ ENVOY_LOG(info, "wasm vm recover failed"); ++ failed = true; ++ } ++ } ++ if (failed) { ++ if (handle->plugin()->fail_open_) { ++ return nullptr; // Fail open skips adding this filter to callbacks. ++ } else { ++ return std::make_shared(nullptr, 0, ++ handle); // Fail closed is handled by an empty Context. ++ } ++ } ++#else + if (!wasm || wasm->isFailed()) { + if (handle->plugin()->fail_open_) { +- // Fail open skips adding this filter to callbacks. +- return nullptr; ++ return nullptr; // Fail open skips adding this filter to callbacks. + } else { +- // Fail closed is handled by an empty Context. +- return std::make_shared(nullptr, 0, handle); ++ return std::make_shared(nullptr, 0, ++ handle); // Fail closed is handled by an empty Context. + } + } ++#endif + return std::make_shared(wasm, handle->rootContextId(), handle); + } + +diff -Naur envoy/source/extensions/filters/network/common/redis/BUILD envoy-new/source/extensions/filters/network/common/redis/BUILD +--- envoy/source/extensions/filters/network/common/redis/BUILD 2024-05-19 11:59:43.042438777 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -51,6 +51,9 @@ + ":redis_command_stats_lib", + "//envoy/upstream:cluster_manager_interface", + ], ++ alimesh_deps = [ ++ "//envoy/upstream:upstream_interface", ++ ], + ) + + envoy_cc_library( +@@ -76,6 +79,41 @@ + ) + + envoy_cc_library( ++ name = "raw_client_interface", ++ hdrs = ["raw_client.h"], ++ visibility = [ ++ "//:contrib_library", ++ "//:extension_library", ++ "//envoy/redis:__pkg__", ++ ], ++ deps = [ ++ ":client_interface", ++ ":redis_command_stats_lib", ++ "//envoy/upstream:upstream_interface", ++ ], ++) ++ ++envoy_cc_library( ++ name = "raw_client_lib", ++ srcs = ["raw_client_impl.cc"], ++ hdrs = ["raw_client_impl.h"], ++ visibility = [ ++ "//:contrib_library", ++ "//:extension_library", ++ "//source/common/redis:__pkg__", ++ ], ++ deps = [ ++ ":raw_client_interface", ++ ":codec_lib", ++ ":utility_lib", ++ "//source/common/buffer:buffer_lib", ++ "//source/common/common:assert_lib", ++ "//source/common/network:filter_lib", ++ "//source/common/upstream:upstream_lib", ++ ] ++) ++ ++envoy_cc_library( + name = "utility_lib", + srcs = ["utility.cc"], + hdrs = ["utility.h"], +diff -Naur envoy/source/extensions/filters/network/common/redis/client.h envoy-new/source/extensions/filters/network/common/redis/client.h +--- envoy/source/extensions/filters/network/common/redis/client.h 2024-05-19 11:59:43.042438777 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/client.h 2024-05-19 11:59:31.674438554 +0800 +@@ -7,6 +7,10 @@ + #include "source/extensions/filters/network/common/redis/codec_impl.h" + #include "source/extensions/filters/network/common/redis/redis_command_stats.h" + ++#if defined(ALIMESH) ++#include "envoy/redis/async_client.h" ++#endif ++ + namespace Envoy { + namespace Extensions { + namespace NetworkFilters { +@@ -14,6 +18,9 @@ + namespace Redis { + namespace Client { + ++#if defined(ALIMESH) ++using PoolRequest = Envoy::Redis::PoolRequest; ++#else + /** + * A handle to an outbound request. + */ +@@ -26,6 +33,7 @@ + */ + virtual void cancel() PURE; + }; ++#endif + + /** + * Outbound request callbacks. +diff -Naur envoy/source/extensions/filters/network/common/redis/codec.h envoy-new/source/extensions/filters/network/common/redis/codec.h +--- envoy/source/extensions/filters/network/common/redis/codec.h 2024-05-19 11:59:43.042438777 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/codec.h 2024-05-19 11:59:31.674438554 +0800 +@@ -165,6 +165,15 @@ + virtual void onRespValue(RespValuePtr&& value) PURE; + }; + ++#if defined(ALIMESH) ++class RawDecoderCallbacks { ++public: ++ virtual ~RawDecoderCallbacks() = default; ++ ++ virtual void onRawResponse(std::string&& response) PURE; ++}; ++#endif ++ + /** + * A redis byte decoder for https://redis.io/topics/protocol + */ +@@ -195,6 +204,15 @@ + virtual DecoderPtr create(DecoderCallbacks& callbacks) PURE; + }; + ++#if defined(ALIMESH) ++class RawDecoderFactory { ++public: ++ virtual ~RawDecoderFactory() = default; ++ ++ virtual DecoderPtr create(RawDecoderCallbacks& callbacks) PURE; ++}; ++#endif ++ + /** + * A redis byte encoder for https://redis.io/topics/protocol + */ +@@ -212,6 +230,17 @@ + + using EncoderPtr = std::unique_ptr; + ++#if defined(ALIMESH) ++class RawEncoder { ++public: ++ virtual ~RawEncoder() = default; ++ ++ virtual void encode(absl::string_view value, Buffer::Instance& out) PURE; ++}; ++ ++using RawEncoderPtr = std::unique_ptr; ++#endif ++ + /** + * A redis protocol error. + */ +diff -Naur envoy/source/extensions/filters/network/common/redis/codec_impl.cc envoy-new/source/extensions/filters/network/common/redis/codec_impl.cc +--- envoy/source/extensions/filters/network/common/redis/codec_impl.cc 2024-05-19 11:59:43.042438777 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/codec_impl.cc 2024-05-19 11:59:31.674438554 +0800 +@@ -549,6 +549,238 @@ + } + } + ++#if defined(ALIMESH) ++void RawDecoderImpl::decode(Buffer::Instance& data) { ++ for (const Buffer::RawSlice& slice : data.getRawSlices()) { ++ parseSlice(slice); ++ } ++ ++ data.drain(data.length()); ++} ++ ++void RawDecoderImpl::parseSlice(const Buffer::RawSlice& slice) { ++ const char* buffer = reinterpret_cast(slice.mem_); ++ uint64_t remaining = slice.len_; ++ ++ while (remaining || state_ == State::ValueComplete) { ++ ENVOY_LOG(trace, "parse slice: {} remaining", remaining); ++ switch (state_) { ++ case State::ValueRootStart: { ++ ENVOY_LOG(trace, "parse slice ValueRootStart"); ++ ++ pending_value_root_.clear(); ++ pending_value_stack_.push_front({RespType::Null, "", 0, 0}); ++ state_ = State::ValueStart; ++ break; ++ } ++ case State::ValueStart: { ++ ENVOY_LOG(trace, "parse slice: ValueStart: {}", buffer[0]); ++ ++ pending_integer_.reset(); ++ switch (buffer[0]) { ++ case '*': { ++ state_ = State::IntegerStart; ++ pending_value_stack_.front().type = RespType::Array; ++ break; ++ } ++ case '$': { ++ state_ = State::IntegerStart; ++ pending_value_stack_.front().type = RespType::BulkString; ++ break; ++ } ++ case '-': { ++ state_ = State::SimpleString; ++ pending_value_stack_.front().type = RespType::Error; ++ break; ++ } ++ case '+': { ++ state_ = State::SimpleString; ++ pending_value_stack_.front().type = RespType::SimpleString; ++ break; ++ } ++ case ':': { ++ state_ = State::IntegerStart; ++ pending_value_stack_.front().type = RespType::Integer; ++ break; ++ } ++ default: { ++ throw ProtocolError("invalid value type"); ++ } ++ } ++ ++ pending_value_stack_.front().value.push_back(buffer[0]); ++ remaining--; ++ buffer++; ++ break; ++ } ++ ++ case State::IntegerStart: { ++ ENVOY_LOG(trace, "parse slice: IntegerStart: {}", buffer[0]); ++ ++ if (buffer[0] == '-') { ++ pending_integer_.negative_ = true; ++ ++ pending_value_stack_.front().value.push_back(buffer[0]); ++ remaining--; ++ buffer++; ++ } else if (buffer[0] == '+') { ++ pending_value_stack_.front().value.push_back(buffer[0]); ++ remaining--; ++ buffer++; ++ } ++ ++ state_ = State::Integer; ++ break; ++ } ++ case State::Integer: { ++ ENVOY_LOG(trace, "parse slice: Integer: {}", buffer[0]); ++ ++ char c = buffer[0]; ++ if (buffer[0] == '\r') { ++ state_ = State::IntegerLF; ++ } else { ++ if (c < '0' || c > '9') { ++ throw ProtocolError("invalid integer character"); ++ } else { ++ pending_integer_.integer_ = (pending_integer_.integer_ * 10) + (c - '0'); ++ } ++ } ++ ++ pending_value_stack_.front().value.push_back(buffer[0]); ++ remaining--; ++ buffer++; ++ break; ++ } ++ ++ case State::IntegerLF: { ++ ENVOY_LOG(trace, "parse slice: IntegerLF: {}", buffer[0]); ++ ++ if (buffer[0] != '\n') { ++ throw ProtocolError("expect new line"); ++ } ++ ++ pending_value_stack_.front().value.push_back(buffer[0]); ++ remaining--; ++ buffer++; ++ ++ PendingValue& current_value = pending_value_stack_.front(); ++ if (current_value.type == RespType::Array) { ++ if (pending_integer_.negative_) { ++ current_value.type = RespType::Null; ++ state_ = State::ValueComplete; ++ } else if (pending_integer_.integer_ == 0) { ++ state_ = State::ValueComplete; ++ } else { ++ current_value.total_array_element = pending_integer_.integer_; ++ pending_value_stack_.push_front({RespType::Null, "", 0, 0}); ++ state_ = State::ValueStart; ++ } ++ } else if (current_value.type == RespType::Integer) { ++ // do not calculate real value here, do not care ++ state_ = State::ValueComplete; ++ } else { ++ ASSERT(current_value.type == RespType::BulkString); ++ if (!pending_integer_.negative_) { ++ state_ = State::BulkStringBody; ++ } else { ++ current_value.type = RespType::Null; ++ state_ = State::ValueComplete; ++ } ++ } ++ break; ++ } ++ ++ case State::BulkStringBody: { ++ ENVOY_LOG(trace, "parse slice: IntegerLF: {}", buffer[0]); ++ ++ ASSERT(!pending_integer_.negative_); ++ uint64_t length_to_copy = ++ std::min(static_cast(pending_integer_.integer_), remaining); ++ pending_value_stack_.front().value.append(buffer, length_to_copy); ++ pending_integer_.integer_ -= length_to_copy; ++ remaining -= length_to_copy; ++ buffer += length_to_copy; ++ ++ if (pending_integer_.integer_ == 0) { ++ state_ = State::CR; ++ } ++ break; ++ } ++ ++ case State::CR: { ++ ENVOY_LOG(trace, "parse slice: CR: {}", buffer[0]); ++ ++ if (buffer[0] != '\r') { ++ throw ProtocolError("expected carriage return"); ++ } ++ pending_value_stack_.front().value.push_back(buffer[0]); ++ remaining--; ++ buffer++; ++ ++ state_ = State::LF; ++ break; ++ } ++ ++ case State::LF: { ++ ENVOY_LOG(trace, "parse slice: CR: {}", buffer[0]); ++ ++ if (buffer[0] != '\n') { ++ throw ProtocolError("expected new line"); ++ } ++ ++ pending_value_stack_.front().value.push_back(buffer[0]); ++ remaining--; ++ buffer++; ++ ++ state_ = State::ValueComplete; ++ break; ++ } ++ ++ case State::SimpleString: { ++ ENVOY_LOG(trace, "parse slice: SimpleString: {}", buffer[0]); ++ ++ if (buffer[0] == '\r') { ++ state_ = State::LF; ++ } ++ pending_value_stack_.front().value.push_back(buffer[0]); ++ remaining--; ++ buffer++; ++ break; ++ } ++ ++ case State::ValueComplete: { ++ ENVOY_LOG(trace, "parse slice: ValueComplete: {}", buffer[0]); ++ ASSERT(!pending_value_stack_.empty()); ++ ++ PendingValue current_value = pending_value_stack_.front(); ++ pending_value_stack_.pop_front(); ++ ++ if (pending_value_stack_.empty()) { ++ pending_value_root_.append(current_value.value); ++ ++ ENVOY_LOG(trace, "calling callbacks on value: {}", pending_value_root_); ++ callbacks_.onRawResponse(std::move(pending_value_root_)); ++ state_ = State::ValueRootStart; ++ } else { ++ PendingValue& array_value = pending_value_stack_.front(); ++ // only array type node can have children ++ ASSERT(array_value.type == RespType::Array); ++ ++ array_value.value.append(current_value.value); ++ ++ if (array_value.current_array_element < array_value.total_array_element - 1) { ++ array_value.current_array_element++; ++ pending_value_stack_.push_front({RespType::Null, "", 0, 0}); ++ state_ = State::ValueStart; ++ } ++ } ++ break; ++ } ++ } ++ } ++} ++#endif ++ + void EncoderImpl::encode(const RespValue& value, Buffer::Instance& out) { + switch (value.type()) { + case RespType::Array: { +@@ -651,6 +883,9 @@ + out.add(string); + out.add("\r\n", 2); + } ++#if defined(ALIMESH) ++void RawEncoderImpl::encode(absl::string_view value, Buffer::Instance& out) { out.add(value); } ++#endif + + } // namespace Redis + } // namespace Common +diff -Naur envoy/source/extensions/filters/network/common/redis/codec_impl.h envoy-new/source/extensions/filters/network/common/redis/codec_impl.h +--- envoy/source/extensions/filters/network/common/redis/codec_impl.h 2024-05-19 11:59:43.042438777 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/codec_impl.h 2024-05-19 11:59:31.674438554 +0800 +@@ -64,6 +64,54 @@ + std::forward_list pending_value_stack_; + }; + ++#if defined(ALIMESH) ++class RawDecoderImpl : public Decoder, Logger::Loggable { ++public: ++ RawDecoderImpl(RawDecoderCallbacks& callbacks) : callbacks_(callbacks) {} ++ ++ // RedisProxy::Decoder ++ void decode(Buffer::Instance& data) override; ++ ++private: ++ enum class State { ++ ValueRootStart, ++ ValueStart, ++ IntegerStart, ++ Integer, ++ IntegerLF, ++ BulkStringBody, ++ CR, ++ LF, ++ SimpleString, ++ ValueComplete ++ }; ++ ++ struct PendingInteger { ++ void reset() { ++ integer_ = 0; ++ negative_ = false; ++ } ++ ++ uint64_t integer_; ++ bool negative_; ++ }; ++ ++ struct PendingValue { ++ RespType type; ++ std::string value; ++ uint64_t current_array_element; ++ uint64_t total_array_element; ++ }; ++ ++ void parseSlice(const Buffer::RawSlice& slice); ++ ++ RawDecoderCallbacks& callbacks_; ++ State state_{State::ValueRootStart}; ++ PendingInteger pending_integer_; ++ std::string pending_value_root_; ++ std::forward_list pending_value_stack_; ++}; ++#endif + /** + * A factory implementation that returns a real decoder. + */ +@@ -74,7 +122,15 @@ + return DecoderPtr{new DecoderImpl(callbacks)}; + } + }; +- ++#if defined(ALIMESH) ++class RawDecoderFactoryImpl : public RawDecoderFactory { ++public: ++ // RedisProxy::RawDecoderFactory ++ DecoderPtr create(RawDecoderCallbacks& callbacks) override { ++ return DecoderPtr{new RawDecoderImpl(callbacks)}; ++ } ++}; ++#endif + /** + * Encoder implementation of https://redis.io/topics/protocol + */ +@@ -91,7 +147,13 @@ + void encodeInteger(int64_t integer, Buffer::Instance& out); + void encodeSimpleString(const std::string& string, Buffer::Instance& out); + }; +- ++#if defined(ALIMESH) ++class RawEncoderImpl : public RawEncoder { ++public: ++ // RedisProxy::RawEncoder ++ void encode(absl::string_view value, Buffer::Instance& out) override; ++}; ++#endif + } // namespace Redis + } // namespace Common + } // namespace NetworkFilters +diff -Naur envoy/source/extensions/filters/network/common/redis/raw_client.h envoy-new/source/extensions/filters/network/common/redis/raw_client.h +--- envoy/source/extensions/filters/network/common/redis/raw_client.h 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/raw_client.h 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,88 @@ ++#pragma once ++ ++#include ++ ++#include "envoy/upstream/upstream.h" ++ ++#include "source/extensions/filters/network/common/redis/client.h" ++#include "source/extensions/filters/network/common/redis/redis_command_stats.h" ++ ++namespace Envoy { ++namespace Extensions { ++namespace NetworkFilters { ++namespace Common { ++namespace Redis { ++namespace Client { ++ ++class RawClientCallbacks { ++public: ++ virtual ~RawClientCallbacks() = default; ++ ++ virtual void onResponse(std::string&& value) PURE; ++ ++ virtual void onFailure() PURE; ++}; ++ ++class DoNothingRawClientCallbacks : public RawClientCallbacks { ++public: ++ // RawClientCallbacks ++ void onFailure() override {} ++ void onResponse(std::string&&) override {} ++}; ++ ++class RawClient : public Event::DeferredDeletable { ++public: ++ ~RawClient() override = default; ++ ++ /** ++ * Adds network connection callbacks to the underlying network connection. ++ */ ++ virtual void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) PURE; ++ ++ /** ++ * Called to determine if the client has pending requests. ++ * @return bool true if the client is processing requests or false if it is currently idle. ++ */ ++ virtual bool active() PURE; ++ ++ /** ++ * Closes the underlying network connection. ++ */ ++ virtual void close() PURE; ++ ++ /** ++ * Make a pipelined request to the remote redis server. ++ * @param request supplies the RESP request to make. ++ * @param callbacks supplies the request callbacks. ++ * @return PoolRequest* a handle to the active request or nullptr if the request could not be made ++ * for some reason. ++ */ ++ virtual PoolRequest* makeRawRequest(absl::string_view request, ++ RawClientCallbacks& callbacks) PURE; ++ ++ /** ++ * Initialize the connection. Issue the auth command and readonly command as needed. ++ * @param auth password for upstream host. ++ */ ++ virtual void initialize(const std::string& auth_username, const std::string& auth_password) PURE; ++}; ++ ++using RawClientPtr = std::unique_ptr; ++ ++class RawClientFactory { ++public: ++ virtual ~RawClientFactory() = default; ++ ++ virtual RawClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, ++ const Config& config, ++ const RedisCommandStatsSharedPtr& redis_command_stats, ++ Stats::Scope& scope, const std::string& auth_username, ++ const std::string& auth_password) PURE; ++}; ++ ++} // namespace Client ++} // namespace Redis ++} // namespace Common ++} // namespace NetworkFilters ++} // namespace Extensions ++} // namespace Envoy +diff -Naur envoy/source/extensions/filters/network/common/redis/raw_client_impl.cc envoy-new/source/extensions/filters/network/common/redis/raw_client_impl.cc +--- envoy/source/extensions/filters/network/common/redis/raw_client_impl.cc 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/raw_client_impl.cc 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,247 @@ ++#include "source/extensions/filters/network/common/redis/raw_client_impl.h" ++ ++#include "source/common/upstream/upstream_impl.h" ++#include "source/extensions/filters/network/common/redis/utility.h" ++ ++namespace Envoy { ++namespace Extensions { ++namespace NetworkFilters { ++namespace Common { ++namespace Redis { ++namespace Client { ++namespace { ++Common::Redis::Client::DoNothingRawClientCallbacks null_raw_client_callbacks; ++} ++ ++RawClientPtr RawClientImpl::create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, ++ RawEncoderPtr&& encoder, RawDecoderFactory& decoder_factory, ++ const Config& config, ++ const RedisCommandStatsSharedPtr& redis_command_stats, ++ Stats::Scope& scope) { ++ auto client = std::make_unique( ++ host, dispatcher, std::move(encoder), decoder_factory, config, redis_command_stats, scope); ++ client->connection_ = host->createConnection(dispatcher, nullptr, nullptr).connection_; ++ client->connection_->addConnectionCallbacks(*client); ++ client->connection_->addReadFilter(Network::ReadFilterSharedPtr{new UpstreamReadFilter(*client)}); ++ client->connection_->connect(); ++ client->connection_->noDelay(true); ++ return client; ++} ++ ++RawClientImpl::RawClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, ++ RawEncoderPtr&& encoder, RawDecoderFactory& decoder_factory, ++ const Config& config, ++ const RedisCommandStatsSharedPtr& redis_command_stats, ++ Stats::Scope& scope) ++ : host_(host), encoder_(std::move(encoder)), decoder_(decoder_factory.create(*this)), ++ config_(config), ++ connect_or_op_timer_(dispatcher.createTimer([this]() { onConnectOrOpTimeout(); })), ++ flush_timer_(dispatcher.createTimer([this]() { flushBufferAndResetTimer(); })), ++ time_source_(dispatcher.timeSource()), redis_command_stats_(redis_command_stats), ++ scope_(scope) { ++ host->cluster().stats().upstream_cx_total_.inc(); ++ host->stats().cx_total_.inc(); ++ host->cluster().stats().upstream_cx_active_.inc(); ++ host->stats().cx_active_.inc(); ++ connect_or_op_timer_->enableTimer(host->cluster().connectTimeout()); ++} ++ ++RawClientImpl::~RawClientImpl() { ++ ASSERT(pending_requests_.empty()); ++ ASSERT(connection_->state() == Network::Connection::State::Closed); ++ host_->cluster().stats().upstream_cx_active_.dec(); ++ host_->stats().cx_active_.dec(); ++} ++ ++void RawClientImpl::close() { connection_->close(Network::ConnectionCloseType::NoFlush); } ++ ++void RawClientImpl::flushBufferAndResetTimer() { ++ if (flush_timer_->enabled()) { ++ flush_timer_->disableTimer(); ++ } ++ connection_->write(encoder_buffer_, false); ++} ++ ++PoolRequest* RawClientImpl::makeRawRequest(absl::string_view request, ++ RawClientCallbacks& callbacks) { ++ ASSERT(connection_->state() == Network::Connection::State::Open); ++ ++ const bool empty_buffer = encoder_buffer_.length() == 0; ++ ++ Stats::StatName command = redis_command_stats_->getUnusedStatName(); ++ ++ pending_requests_.emplace_back(*this, callbacks, command); ++ encoder_->encode(request, encoder_buffer_); ++ ++ // If buffer is full, flush. If the buffer was empty before the request, start the timer. ++ if (encoder_buffer_.length() >= config_.maxBufferSizeBeforeFlush()) { ++ flushBufferAndResetTimer(); ++ } else if (empty_buffer) { ++ flush_timer_->enableTimer(std::chrono::milliseconds(config_.bufferFlushTimeoutInMs())); ++ } ++ ++ // Only boost the op timeout if: ++ // - We are not already connected. Otherwise, we are governed by the connect timeout and the timer ++ // will be reset when/if connection occurs. This allows a relatively long connection spin up ++ // time for example if TLS is being used. ++ // - This is the first request on the pipeline. Otherwise the timeout would effectively start on ++ // the last operation. ++ if (connected_ && pending_requests_.size() == 1) { ++ connect_or_op_timer_->enableTimer(config_.opTimeout()); ++ } ++ ++ return &pending_requests_.back(); ++} ++ ++void RawClientImpl::onConnectOrOpTimeout() { ++ putOutlierEvent(Upstream::Outlier::Result::LocalOriginTimeout); ++ if (connected_) { ++ host_->cluster().stats().upstream_rq_timeout_.inc(); ++ host_->stats().rq_timeout_.inc(); ++ } else { ++ host_->cluster().stats().upstream_cx_connect_timeout_.inc(); ++ host_->stats().cx_connect_fail_.inc(); ++ } ++ ++ connection_->close(Network::ConnectionCloseType::NoFlush); ++} ++ ++void RawClientImpl::onData(Buffer::Instance& data) { ++ try { ++ decoder_->decode(data); ++ } catch (ProtocolError&) { ++ putOutlierEvent(Upstream::Outlier::Result::ExtOriginRequestFailed); ++ host_->cluster().stats().upstream_cx_protocol_error_.inc(); ++ host_->stats().rq_error_.inc(); ++ connection_->close(Network::ConnectionCloseType::NoFlush); ++ } ++} ++ ++void RawClientImpl::putOutlierEvent(Upstream::Outlier::Result result) { ++ if (!config_.disableOutlierEvents()) { ++ host_->outlierDetector().putResult(result); ++ } ++} ++ ++void RawClientImpl::onEvent(Network::ConnectionEvent event) { ++ if (event == Network::ConnectionEvent::RemoteClose || ++ event == Network::ConnectionEvent::LocalClose) { ++ ++ Upstream::reportUpstreamCxDestroy(host_, event); ++ if (!pending_requests_.empty()) { ++ Upstream::reportUpstreamCxDestroyActiveRequest(host_, event); ++ if (event == Network::ConnectionEvent::RemoteClose) { ++ putOutlierEvent(Upstream::Outlier::Result::LocalOriginConnectFailed); ++ } ++ } ++ ++ while (!pending_requests_.empty()) { ++ PendingRequest& request = pending_requests_.front(); ++ if (!request.canceled_) { ++ request.callbacks_.onFailure(); ++ } else { ++ host_->cluster().stats().upstream_rq_cancelled_.inc(); ++ } ++ pending_requests_.pop_front(); ++ } ++ ++ connect_or_op_timer_->disableTimer(); ++ } else if (event == Network::ConnectionEvent::Connected) { ++ connected_ = true; ++ ASSERT(!pending_requests_.empty()); ++ connect_or_op_timer_->enableTimer(config_.opTimeout()); ++ } ++ ++ if (event == Network::ConnectionEvent::RemoteClose && !connected_) { ++ host_->cluster().stats().upstream_cx_connect_fail_.inc(); ++ host_->stats().cx_connect_fail_.inc(); ++ } ++} ++ ++void RawClientImpl::onRawResponse(std::string&& response) { ++ ASSERT(!pending_requests_.empty()); ++ PendingRequest& request = pending_requests_.front(); ++ const bool canceled = request.canceled_; ++ ++ request.aggregate_request_timer_->complete(); ++ ++ RawClientCallbacks& callbacks = request.callbacks_; ++ ++ // We need to ensure the request is popped before calling the callback, since the callback might ++ // result in closing the connection. ++ pending_requests_.pop_front(); ++ if (canceled) { ++ host_->cluster().stats().upstream_rq_cancelled_.inc(); ++ } else { ++ // do not handle redirection here ++ callbacks.onResponse(std::move(response)); ++ } ++ ++ // If there are no remaining ops in the pipeline we need to disable the timer. ++ // Otherwise we boost the timer since we are receiving responses and there are more to flush ++ // out. ++ if (pending_requests_.empty()) { ++ connect_or_op_timer_->disableTimer(); ++ } else { ++ connect_or_op_timer_->enableTimer(config_.opTimeout()); ++ } ++ ++ putOutlierEvent(Upstream::Outlier::Result::ExtOriginRequestSuccess); ++} ++ ++RawClientImpl::PendingRequest::PendingRequest(RawClientImpl& parent, RawClientCallbacks& callbacks, ++ Stats::StatName command) ++ : parent_(parent), callbacks_(callbacks), command_{command}, ++ aggregate_request_timer_(parent_.redis_command_stats_->createAggregateTimer( ++ parent_.scope_, parent_.time_source_)) { ++ parent.host_->cluster().stats().upstream_rq_total_.inc(); ++ parent.host_->stats().rq_total_.inc(); ++ parent.host_->cluster().stats().upstream_rq_active_.inc(); ++ parent.host_->stats().rq_active_.inc(); ++} ++ ++RawClientImpl::PendingRequest::~PendingRequest() { ++ parent_.host_->cluster().stats().upstream_rq_active_.dec(); ++ parent_.host_->stats().rq_active_.dec(); ++} ++ ++void RawClientImpl::PendingRequest::cancel() { ++ // If we get a cancellation, we just mark the pending request as cancelled, and then we drop ++ // the response as it comes through. There is no reason to blow away the connection when the ++ // remote is already responding as fast as possible. ++ canceled_ = true; ++} ++ ++void RawClientImpl::initialize(const std::string& auth_username, const std::string& auth_password) { ++ if (!auth_username.empty()) { ++ std::string auth_request = Utility::makeRawAuthRequest(auth_username, auth_password); ++ makeRawRequest(auth_request, null_raw_client_callbacks); ++ } else if (!auth_password.empty()) { ++ std::string auth_request = Utility::makeRawAuthRequest(auth_password); ++ makeRawRequest(auth_request, null_raw_client_callbacks); ++ } ++ ++ if (config_.readPolicy() != Common::Redis::Client::ReadPolicy::Primary) { ++ makeRawRequest(Utility::makeRawReadOnlyRequest(), null_raw_client_callbacks); ++ } ++} ++ ++RawClientFactoryImpl RawClientFactoryImpl::instance_; ++ ++RawClientPtr RawClientFactoryImpl::create(Upstream::HostConstSharedPtr host, ++ Event::Dispatcher& dispatcher, const Config& config, ++ const RedisCommandStatsSharedPtr& redis_command_stats, ++ Stats::Scope& scope, const std::string& auth_username, ++ const std::string& auth_password) { ++ RawClientPtr client = RawClientImpl::create(host, dispatcher, RawEncoderPtr{new RawEncoderImpl()}, ++ decoder_factory_, config, redis_command_stats, scope); ++ client->initialize(auth_username, auth_password); ++ return client; ++} ++ ++} // namespace Client ++} // namespace Redis ++} // namespace Common ++} // namespace NetworkFilters ++} // namespace Extensions ++} // namespace Envoy +diff -Naur envoy/source/extensions/filters/network/common/redis/raw_client_impl.h envoy-new/source/extensions/filters/network/common/redis/raw_client_impl.h +--- envoy/source/extensions/filters/network/common/redis/raw_client_impl.h 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/raw_client_impl.h 2024-05-19 11:59:31.674438554 +0800 +@@ -0,0 +1,114 @@ ++#pragma once ++ ++#include "source/common/buffer/buffer_impl.h" ++#include "source/common/network/filter_impl.h" ++#include "source/extensions/filters/network/common/redis/raw_client.h" ++ ++namespace Envoy { ++namespace Extensions { ++namespace NetworkFilters { ++namespace Common { ++namespace Redis { ++namespace Client { ++ ++class RawClientImpl : public RawClient, ++ public RawDecoderCallbacks, ++ public Network::ConnectionCallbacks { ++public: ++ static RawClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, ++ RawEncoderPtr&& encoder, RawDecoderFactory& decoder_factory, ++ const Config& config, ++ const RedisCommandStatsSharedPtr& redis_command_stats, ++ Stats::Scope& scope); ++ ++ RawClientImpl(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, ++ RawEncoderPtr&& encoder, RawDecoderFactory& decoder_factory, const Config& config, ++ const RedisCommandStatsSharedPtr& redis_command_stats, Stats::Scope& scope); ++ ~RawClientImpl() override; ++ ++ // RawClient ++ void addConnectionCallbacks(Network::ConnectionCallbacks& callbacks) override { ++ connection_->addConnectionCallbacks(callbacks); ++ } ++ void close() override; ++ PoolRequest* makeRawRequest(absl::string_view request, RawClientCallbacks& callbacks) override; ++ bool active() override { return !pending_requests_.empty(); } ++ void flushBufferAndResetTimer(); ++ void initialize(const std::string& auth_username, const std::string& auth_password) override; ++ ++private: ++ friend class RedisRawClientImplTest; ++ ++ struct UpstreamReadFilter : public Network::ReadFilterBaseImpl { ++ UpstreamReadFilter(RawClientImpl& parent) : parent_(parent) {} ++ ++ // Network::ReadFilter ++ Network::FilterStatus onData(Buffer::Instance& data, bool) override { ++ parent_.onData(data); ++ return Network::FilterStatus::Continue; ++ } ++ ++ RawClientImpl& parent_; ++ }; ++ ++ struct PendingRequest : public PoolRequest { ++ PendingRequest(RawClientImpl& parent, RawClientCallbacks& callbacks, Stats::StatName stat_name); ++ ~PendingRequest() override; ++ ++ // PoolRequest ++ void cancel() override; ++ ++ RawClientImpl& parent_; ++ RawClientCallbacks& callbacks_; ++ Stats::StatName command_; ++ bool canceled_{}; ++ Stats::TimespanPtr aggregate_request_timer_; ++ Stats::TimespanPtr command_request_timer_; ++ }; ++ ++ void onConnectOrOpTimeout(); ++ void onData(Buffer::Instance& data); ++ void putOutlierEvent(Upstream::Outlier::Result result); ++ ++ // RawDecoderCallbacks ++ void onRawResponse(std::string&& response) override; ++ ++ // Network::ConnectionCallbacks ++ void onEvent(Network::ConnectionEvent event) override; ++ void onAboveWriteBufferHighWatermark() override {} ++ void onBelowWriteBufferLowWatermark() override {} ++ ++ Upstream::HostConstSharedPtr host_; ++ Network::ClientConnectionPtr connection_; ++ RawEncoderPtr encoder_; ++ Buffer::OwnedImpl encoder_buffer_; ++ DecoderPtr decoder_; ++ const Config& config_; ++ std::list pending_requests_; ++ Event::TimerPtr connect_or_op_timer_; ++ bool connected_{}; ++ Event::TimerPtr flush_timer_; ++ Envoy::TimeSource& time_source_; ++ const RedisCommandStatsSharedPtr redis_command_stats_; ++ Stats::Scope& scope_; ++}; ++ ++class RawClientFactoryImpl : public RawClientFactory { ++public: ++ RawClientPtr create(Upstream::HostConstSharedPtr host, Event::Dispatcher& dispatcher, ++ const Config& config, const RedisCommandStatsSharedPtr& redis_command_stats, ++ Stats::Scope& scope, const std::string& auth_username, ++ const std::string& auth_password) override; ++ ++ static RawClientFactoryImpl instance_; ++ ++private: ++ RawDecoderFactoryImpl decoder_factory_; ++}; ++ ++} // namespace Client ++} // namespace Redis ++} // namespace Common ++} // namespace NetworkFilters ++} // namespace Extensions ++} // namespace Envoy +diff -Naur envoy/source/extensions/filters/network/common/redis/utility.cc envoy-new/source/extensions/filters/network/common/redis/utility.cc +--- envoy/source/extensions/filters/network/common/redis/utility.cc 2024-05-19 11:59:43.042438777 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/utility.cc 2024-05-19 11:59:31.674438554 +0800 +@@ -37,7 +37,38 @@ + response->asString() = error; + return response; + } ++#if defined(ALIMESH) ++std::string makeRawError(const std::string& error) { ++ std::string result; ++ result.append(fmt::format("-{}\r\n", error)); ++ return result; ++} ++ ++std::string makeRawRequest(const std::string& command, std::vector params) { ++ std::string result; ++ result.append(fmt::format("*{}\r\n", 1 + params.size())); ++ result.append(fmt::format("${}\r\n{}\r\n", command.size(), command)); ++ for (auto& param : params) { ++ result.append(fmt::format("${}\r\n{}\r\n", param.size(), param)); ++ } ++ return result; ++} ++ ++std::string makeRawAuthRequest(const std::string& username, const std::string& password) { ++ return makeRawRequest("AUTH", {username, password}); ++} ++ ++std::string makeRawAuthRequest(const std::string& password) { ++ return makeRawRequest("AUTH", {password}); ++} + ++absl::string_view makeRawReadOnlyRequest() { ++ const std::string readonly{"readonly"}; ++ static const std::string readonly_request = ++ fmt::format("${}\r\n{}\r\n", readonly.size(), readonly); ++ return readonly_request; ++} ++#endif + ReadOnlyRequest::ReadOnlyRequest() { + std::vector values(1); + values[0].type(RespType::BulkString); +diff -Naur envoy/source/extensions/filters/network/common/redis/utility.h envoy-new/source/extensions/filters/network/common/redis/utility.h +--- envoy/source/extensions/filters/network/common/redis/utility.h 2024-05-19 11:59:43.042438777 +0800 ++++ envoy-new/source/extensions/filters/network/common/redis/utility.h 2024-05-19 11:59:31.674438554 +0800 +@@ -18,6 +18,12 @@ + }; + + RespValuePtr makeError(const std::string& error); ++#if defined(ALIMESH) ++std::string makeRawError(const std::string& error); ++std::string makeRawAuthRequest(const std::string& password); ++std::string makeRawAuthRequest(const std::string& username, const std::string& password); ++absl::string_view makeRawReadOnlyRequest(); ++#endif + + class ReadOnlyRequest : public Redis::RespValue { + public: +diff -Naur envoy/source/extensions/filters/network/wasm/wasm_filter.h envoy-new/source/extensions/filters/network/wasm/wasm_filter.h +--- envoy/source/extensions/filters/network/wasm/wasm_filter.h 2024-05-19 11:59:43.046438777 +0800 ++++ envoy-new/source/extensions/filters/network/wasm/wasm_filter.h 2024-05-19 11:59:31.674438554 +0800 +@@ -28,19 +28,52 @@ + + std::shared_ptr createFilter() { + Wasm* wasm = nullptr; +- PluginHandleSharedPtr handle = tls_slot_->get()->handle(); ++ if (!tls_slot_->currentThreadRegistered()) { ++ return nullptr; ++ } ++ auto opt_ref = tls_slot_->get(); ++ if (!opt_ref) { ++ return nullptr; ++ } ++ PluginHandleSharedPtr handle = opt_ref->handle(); ++ if (!handle) { ++ return nullptr; ++ } + if (handle->wasmHandle()) { + wasm = handle->wasmHandle()->wasm().get(); + } ++#if defined(ALIMESH) ++ auto failed = false; ++ if (!wasm) { ++ failed = true; ++ } else if (wasm->isFailed()) { ++ ENVOY_LOG(info, "wasm vm is crashed, try to recover"); ++ if (opt_ref->recover()) { ++ ENVOY_LOG(info, "wasm vm recover success"); ++ wasm = opt_ref->handle()->wasmHandle()->wasm().get(); ++ } else { ++ ENVOY_LOG(info, "wasm vm recover failed"); ++ failed = true; ++ } ++ } ++ if (failed) { ++ if (handle->plugin()->fail_open_) { ++ return nullptr; // Fail open skips adding this filter to callbacks. ++ } else { ++ return std::make_shared(nullptr, 0, ++ handle); // Fail closed is handled by an empty Context. ++ } ++ } ++#else + if (!wasm || wasm->isFailed()) { + if (handle->plugin()->fail_open_) { +- // Fail open skips adding this filter to callbacks. +- return nullptr; ++ return nullptr; // Fail open skips adding this filter to callbacks. + } else { +- // Fail closed is handled by an empty Context. +- return std::make_shared(nullptr, 0, handle); ++ return std::make_shared(nullptr, 0, ++ handle); // Fail closed is handled by an empty Context. + } + } ++#endif + return std::make_shared(wasm, handle->rootContextId(), handle); + } + +diff -Naur envoy/test/extensions/access_loggers/wasm/test_data/BUILD envoy-new/test/extensions/access_loggers/wasm/test_data/BUILD +--- envoy/test/extensions/access_loggers/wasm/test_data/BUILD 2024-05-19 11:59:43.078438778 +0800 ++++ envoy-new/test/extensions/access_loggers/wasm/test_data/BUILD 2024-05-19 11:59:31.674438554 +0800 +@@ -28,7 +28,4 @@ + envoy_wasm_cc_binary( + name = "test_cpp.wasm", + srcs = ["test_cpp.cc"], +- deps = [ +- "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_lite", +- ], + ) +diff -Naur envoy/test/extensions/bootstrap/wasm/test_data/BUILD envoy-new/test/extensions/bootstrap/wasm/test_data/BUILD +--- envoy/test/extensions/bootstrap/wasm/test_data/BUILD 2024-05-19 11:59:43.078438778 +0800 ++++ envoy-new/test/extensions/bootstrap/wasm/test_data/BUILD 2024-05-19 11:59:31.678438554 +0800 +@@ -108,9 +108,7 @@ + envoy_wasm_cc_binary( + name = "speed_cpp.wasm", + srcs = ["speed_cpp.cc"], +- deps = [ +- "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_full", +- ], ++ protobuf = "full", + ) + + envoy_wasm_cc_binary( +diff -Naur envoy/test/extensions/bootstrap/wasm/test_data/logging_rust.rs envoy-new/test/extensions/bootstrap/wasm/test_data/logging_rust.rs +--- envoy/test/extensions/bootstrap/wasm/test_data/logging_rust.rs 2024-05-19 11:59:43.078438778 +0800 ++++ envoy-new/test/extensions/bootstrap/wasm/test_data/logging_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -2,18 +2,10 @@ + use proxy_wasm::traits::{Context, RootContext}; + use proxy_wasm::types::LogLevel; + +-extern "C" { +- fn __wasilibc_initialize_environ(); +-} +- +-#[no_mangle] +-pub fn _start() { +- unsafe { +- __wasilibc_initialize_environ(); +- } ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { Box::new(TestRoot) }); +-} ++}} + + struct TestRoot; + +diff -Naur envoy/test/extensions/common/wasm/wasm_vm_test.cc envoy-new/test/extensions/common/wasm/wasm_vm_test.cc +--- envoy/test/extensions/common/wasm/wasm_vm_test.cc 2024-05-19 11:59:43.082438778 +0800 ++++ envoy-new/test/extensions/common/wasm/wasm_vm_test.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -56,11 +56,11 @@ + TEST_F(BaseVmTest, NullVmStartup) { + auto wasm_vm = createWasmVm("envoy.wasm.runtime.null"); + EXPECT_TRUE(wasm_vm != nullptr); +- EXPECT_TRUE(wasm_vm->runtime() == "null"); ++ EXPECT_TRUE(wasm_vm->getEngineName() == "null"); + EXPECT_TRUE(wasm_vm->cloneable() == Cloneable::InstantiatedModule); + auto wasm_vm_clone = wasm_vm->clone(); + EXPECT_TRUE(wasm_vm_clone != nullptr); +- EXPECT_EQ(wasm_vm->runtime(), "null"); ++ EXPECT_EQ(wasm_vm->getEngineName(), "null"); + std::function f; + EXPECT_FALSE(wasm_vm->integration()->getNullVmFunction("bad_function", false, 0, nullptr, &f)); + } +@@ -184,7 +184,7 @@ + + TEST_P(WasmVmTest, V8Load) { + ASSERT_TRUE(init()); +- EXPECT_TRUE(wasm_vm_->runtime() == "v8"); ++ EXPECT_TRUE(wasm_vm_->getEngineName() == "v8"); + EXPECT_TRUE(wasm_vm_->cloneable() == Cloneable::CompiledBytecode); + EXPECT_TRUE(wasm_vm_->clone() != nullptr); + } +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/async_call_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/async_call_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/async_call_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/async_call_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -3,11 +3,10 @@ + use proxy_wasm::types::*; + use std::time::Duration; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestStream) }); +-} ++}} + + struct TestStream; + +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/body_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/body_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/body_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/body_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -2,8 +2,7 @@ + use proxy_wasm::traits::{Context, HttpContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_http_context(|_, _| -> Box { + Box::new(TestStream { +@@ -11,7 +10,7 @@ + body_chunks: 0, + }) + }); +-} ++}} + + struct TestStream { + test: Option, +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/BUILD envoy-new/test/extensions/filters/http/wasm/test_data/BUILD +--- envoy/test/extensions/filters/http/wasm/test_data/BUILD 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/BUILD 2024-05-19 11:59:31.678438554 +0800 +@@ -123,6 +123,7 @@ + "test_cpp_null_plugin.cc", + "test_grpc_call_cpp.cc", + "test_grpc_stream_cpp.cc", ++ "test_redis_call_cpp.cc", + "test_resume_call_cpp.cc", + "test_shared_data_cpp.cc", + "test_shared_queue_cpp.cc", +@@ -149,16 +150,17 @@ + "test_cpp.cc", + "test_grpc_call_cpp.cc", + "test_grpc_stream_cpp.cc", ++ "test_redis_call_cpp.cc", + "test_panic_cpp.cc", + "test_resume_call_cpp.cc", + "test_shared_data_cpp.cc", + "test_shared_queue_cpp.cc", + ], ++ protobuf = "lite", + deps = [ + ":test_cc_proto", + "//source/extensions/common/wasm/ext:declare_property_cc_proto", + "//source/extensions/common/wasm/ext:envoy_proxy_wasm_api_lib", +- "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_lite", + "@proxy_wasm_cpp_sdk//contrib:contrib_lib", + ], + ) +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/close_stream_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/close_stream_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/close_stream_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/close_stream_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -1,10 +1,9 @@ + use proxy_wasm::traits::{Context, HttpContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestStream) }); +-} ++}} + + struct TestStream; + +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/grpc_call_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/grpc_call_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/grpc_call_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/grpc_call_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -11,12 +11,11 @@ + static CALLOUT_ID: Cell> = Cell::new(None); + } + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { Box::new(TestGrpcCallRoot) }); + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestGrpcCall) }); +-} ++}} + + struct TestGrpcCallRoot; + +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/grpc_stream_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/grpc_stream_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/grpc_stream_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/grpc_stream_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -4,11 +4,10 @@ + use proxy_wasm::traits::{Context, HttpContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestGrpcStream) }); +-} ++}} + + struct TestGrpcStream; + +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/headers_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/headers_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/headers_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/headers_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -2,20 +2,12 @@ + use proxy_wasm::traits::{Context, HttpContext}; + use proxy_wasm::types::*; + +-extern "C" { +- fn __wasilibc_initialize_environ(); +-} +- +-#[no_mangle] +-pub fn _start() { +- unsafe { +- __wasilibc_initialize_environ(); +- } ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_http_context(|context_id, _| -> Box { + Box::new(TestStream { context_id }) + }); +-} ++}} + + struct TestStream { + context_id: u32, +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/metadata_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/metadata_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/metadata_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/metadata_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -3,12 +3,11 @@ + use proxy_wasm::types::*; + use std::convert::TryFrom; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { Box::new(TestRoot) }); + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestStream) }); +-} ++}} + + struct TestRoot; + +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/panic_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/panic_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/panic_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/panic_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -1,10 +1,9 @@ + use proxy_wasm::traits::{Context, HttpContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestStream) }); +-} ++}} + + struct TestStream; + +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/resume_call_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/resume_call_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/resume_call_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/resume_call_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -3,11 +3,10 @@ + use proxy_wasm::types::*; + use std::time::Duration; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_http_context(|_, _| -> Box { Box::new(TestStream) }); +-} ++}} + + struct TestStream; + +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/shared_data_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/shared_data_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/shared_data_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/shared_data_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -2,11 +2,10 @@ + use proxy_wasm::traits::{Context, RootContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { Box::new(TestRoot) }); +-} ++}} + + struct TestRoot; + +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/shared_queue_rust.rs envoy-new/test/extensions/filters/http/wasm/test_data/shared_queue_rust.rs +--- envoy/test/extensions/filters/http/wasm/test_data/shared_queue_rust.rs 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/shared_queue_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -2,13 +2,12 @@ + use proxy_wasm::traits::{Context, HttpContext, RootContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_root_context(|_| -> Box { + Box::new(TestRoot { queue_id: None }) + }); +-} ++}} + + struct TestRoot { + queue_id: Option, +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/test_cpp.cc envoy-new/test/extensions/filters/http/wasm/test_data/test_cpp.cc +--- envoy/test/extensions/filters/http/wasm/test_data/test_cpp.cc 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/test_cpp.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -269,7 +269,39 @@ + } + + return FilterHeadersStatus::Continue; ++ } else if (test == "CrashRecover") { ++ if (!getRequestHeader("crash")->toString().empty()) { ++ abort(); ++ } ++ } else if (test == "DisableClearRouteCache") { ++ setFilterState("clear_route_cache", "off"); ++ logDebug(std::string("onRequestHeaders ") + std::to_string(id()) + std::string(" ") + test); ++ auto path = getRequestHeader(":path"); ++ logInfo(std::string("header path ") + std::string(path->view())); ++ std::string protocol; ++ addRequestHeader("newheader", "newheadervalue"); ++ auto server = getRequestHeader("server"); ++ replaceRequestHeader("server", "envoy-wasm"); ++ auto r = addResponseHeader("bad", "bad"); ++ if (r != WasmResult::BadArgument) { ++ logWarn("unexpected success of addResponseHeader"); ++ } ++ if (addResponseTrailer("bad", "bad") != WasmResult::BadArgument) { ++ logWarn("unexpected success of addResponseTrailer"); ++ } ++ if (removeResponseTrailer("bad") != WasmResult::BadArgument) { ++ logWarn("unexpected success of remoteResponseTrailer"); ++ } ++ size_t size; ++ if (getRequestHeaderSize(&size) != WasmResult::Ok) { ++ logWarn("unexpected failure of getRequestHeaderMapSize"); ++ } ++ if (getResponseHeaderSize(&size) != WasmResult::BadArgument) { ++ logWarn("unexpected success of getResponseHeaderMapSize"); ++ } ++ return FilterHeadersStatus::Continue; + } ++ + return FilterHeadersStatus::Continue; + } + +@@ -294,7 +326,12 @@ + auto test = root()->test_; + if (test == "headers") { + CHECK_RESULT(addResponseHeader("test-status", "OK")); ++ } else if (test == "CrashRecover") { ++ if (!getResponseHeader("crash")->toString().empty()) { ++ abort(); ++ } + } ++ + return FilterHeadersStatus::Continue; + } + +@@ -335,17 +372,28 @@ + } + logTrace(std::string("Struct ") + request_string + " " + request_string2); + return FilterDataStatus::Continue; ++ } else if (test == "CrashRecover") { ++ auto body = getBufferBytes(WasmBufferType::HttpRequestBody, 0, body_buffer_length); ++ if (!body->toString().empty()) { ++ abort(); ++ } + } + return FilterDataStatus::Continue; + } + +-FilterDataStatus TestContext::onResponseBody(size_t, bool end_of_stream) { ++FilterDataStatus TestContext::onResponseBody(size_t body_buffer_length, bool end_of_stream) { + auto test = root()->test_; + if (test == "headers") { + if (end_of_stream) { + CHECK_RESULT(addResponseTrailer("newtrailer", "response")); + } ++ } else if (test == "CrashRecover") { ++ auto body = getBufferBytes(WasmBufferType::HttpResponseBody, 0, body_buffer_length); ++ if (!body->toString().empty()) { ++ abort(); ++ } + } ++ + return FilterDataStatus::Continue; + } + +@@ -385,7 +433,8 @@ + logWarn("response.code: " + std::to_string(responseCode)); + } + std::string upstream_host_metadata; +- if (getValue({"upstream_host_metadata", "filter_metadata", "namespace", "key"}, &upstream_host_metadata)) { ++ if (getValue({"upstream_host_metadata", "filter_metadata", "namespace", "key"}, ++ &upstream_host_metadata)) { + logWarn("upstream host metadata: " + upstream_host_metadata); + } + logWarn("state: " + getProperty({"wasm_state"}).value()->toString()); +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/test_grpc_call_cpp.cc envoy-new/test/extensions/filters/http/wasm/test_data/test_grpc_call_cpp.cc +--- envoy/test/extensions/filters/http/wasm/test_data/test_grpc_call_cpp.cc 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/test_grpc_call_cpp.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -15,14 +15,23 @@ + public: + MyGrpcCallHandler() : GrpcCallHandler() {} + void onSuccess(size_t body_size) override { ++ if (call_done_) { ++ proxy_done(); ++ return; ++ } + auto response = getBufferBytes(WasmBufferType::GrpcReceiveBuffer, 0, body_size); + logDebug(response->proto().string_value()); + cancel(); + } + void onFailure(GrpcStatus) override { ++ if (call_done_) { ++ proxy_done(); ++ return; ++ } + auto p = getStatus(); + logDebug(std::string("failure ") + std::string(p.second->view())); + } ++ bool call_done_{false}; + }; + + class GrpcCallRootContext : public RootContext { +@@ -32,12 +41,20 @@ + void onQueueReady(uint32_t op) override { + if (op == 0) { + handler_->cancel(); +- } else { ++ } else if (op == 1) { + grpcClose(handler_->token()); ++ } else if (op == 2) { ++ on_done_ = false; ++ handler_->call_done_ = true; + } + } + ++ bool onDone() override { ++ return on_done_; ++ } ++ + MyGrpcCallHandler* handler_ = nullptr; ++ bool on_done_{true}; + }; + + class GrpcCallContextProto : public Context { +diff -Naur envoy/test/extensions/filters/http/wasm/test_data/test_redis_call_cpp.cc envoy-new/test/extensions/filters/http/wasm/test_data/test_redis_call_cpp.cc +--- envoy/test/extensions/filters/http/wasm/test_data/test_redis_call_cpp.cc 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/test_data/test_redis_call_cpp.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -0,0 +1,62 @@ ++// NOLINT(namespace-envoy) ++#include ++#include ++#include ++ ++#ifndef NULL_PLUGIN ++#include "proxy_wasm_intrinsics_lite.h" ++#else ++#include "source/extensions/common/wasm/ext/envoy_null_plugin.h" ++#endif ++ ++START_WASM_PLUGIN(HttpWasmTestCpp) ++ ++class RedisCallContext : public Context { ++public: ++ explicit RedisCallContext(uint32_t id, RootContext* root) : Context(id, root) {} ++ ++ FilterHeadersStatus onRequestHeaders(uint32_t, bool) override; ++}; ++ ++class RedisCallRootContext : public RootContext { ++public: ++ explicit RedisCallRootContext(uint32_t id, std::string_view root_id) : RootContext(id, root_id) {} ++}; ++ ++static RegisterContextFactory register_RedisCallContext(CONTEXT_FACTORY(RedisCallContext), ++ ROOT_FACTORY(RedisCallRootContext), ++ "redis_call"); ++ ++FilterHeadersStatus RedisCallContext::onRequestHeaders(uint32_t, bool) { ++ auto context_id = id(); ++ auto callback = [context_id](RedisStatus, size_t body_size) { ++ if (body_size == 0) { ++ logInfo("redis_call failed"); ++ return; ++ } ++ ++ getContext(context_id)->setEffectiveContext(); ++ logWarn(std::string("bodysize: 5")); ++ auto response = getBufferBytes(WasmBufferType::RedisCallResponse, 0, body_size); ++ logDebug(std::string(response->view())); ++ }; ++ ++ // set id 1 ++ auto query = "*3\r\n$3\r\nset\r\n$2\r\nid\r\n$1\r\n1\r\n"; ++ auto path = getRequestHeader(":path"); ++ if (path->view() == "/bad") { ++ if (root()->redisCall("cluster", query, callback) != WasmResult::Ok) { ++ logInfo("redis_call rejected"); ++ } ++ } else { ++ if (root()->redisCall("bogus cluster", query, callback) == WasmResult::Ok) { ++ logError("bogus cluster found error"); ++ } ++ root()->redisCall("cluster", query, callback); ++ logInfo("onRequestHeaders"); ++ } ++ ++ return FilterHeadersStatus::StopIteration; ++} ++ ++END_WASM_PLUGIN +diff -Naur envoy/test/extensions/filters/http/wasm/wasm_filter_test.cc envoy-new/test/extensions/filters/http/wasm/wasm_filter_test.cc +--- envoy/test/extensions/filters/http/wasm/wasm_filter_test.cc 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/http/wasm/wasm_filter_test.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -312,6 +312,105 @@ + EXPECT_THAT(request_headers.get_("server"), Eq("envoy-wasm-continue")); + filter().onDestroy(); + } ++TEST_P(WasmHttpFilterTest, RecoverFromCrash) { ++ auto runtime = std::get<0>(GetParam()); ++ if (runtime == "null") { ++ return; ++ } ++ if (std::get<1>(GetParam()) != "cpp") { ++ return; ++ } ++ setupTest("", "CrashRecover"); ++ setupFilter(); ++ EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); ++ auto& crash_total = scope_->counterFromString("wasm.envoy.wasm.runtime." + runtime + ++ ".plugin.plugin_name.crash_total"); ++ auto& crash_vm = ++ scope_->gaugeFromString("wasm.envoy.wasm.runtime." + runtime + ".plugin.plugin_name.crash", ++ Stats::Gauge::ImportMode::NeverImport); ++ auto& recover_total = scope_->counterFromString("wasm.envoy.wasm.runtime." + runtime + ++ ".plugin.plugin_name.recover_total"); ++ auto& recover_error = scope_->counterFromString("wasm.envoy.wasm.runtime." + runtime + ++ ".plugin.plugin_name.recover_error"); ++ Http::MockStreamDecoderFilterCallbacks decoder_callbacks; ++ filter().setDecoderFilterCallbacks(decoder_callbacks); ++ ++ EXPECT_EQ(0U, crash_total.value()); ++ EXPECT_EQ(0U, crash_vm.value()); ++ EXPECT_EQ(0U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ auto fail_headers = Http::TestResponseHeaderMapImpl{{":status", "503"}}; ++ EXPECT_CALL(decoder_callbacks, encodeHeaders_(HeaderMapEqualRef(&fail_headers), true)); ++ EXPECT_CALL(decoder_callbacks, ++ sendLocalReply(Envoy::Http::Code::ServiceUnavailable, testing::Eq(""), _, ++ testing::Eq(Grpc::Status::WellKnownGrpcStatus::Unavailable), ++ testing::Eq("wasm_fail_stream"))); ++ Http::TestRequestHeaderMapImpl request_headers{{"crash", "true"}}; ++ EXPECT_NE(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); ++ EXPECT_EQ(1U, crash_total.value()); ++ EXPECT_EQ(1U, crash_vm.value()); ++ EXPECT_EQ(0U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ doRecover(); ++ filter().onCreate(); ++ request_headers = {}; ++ EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); ++ EXPECT_EQ(1U, crash_total.value()); ++ EXPECT_EQ(0U, crash_vm.value()); ++ EXPECT_EQ(1U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ Http::TestResponseHeaderMapImpl response_headers{{"crash", "true"}}; ++ EXPECT_NE(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false)); ++ EXPECT_EQ(2U, crash_total.value()); ++ EXPECT_EQ(1U, crash_vm.value()); ++ EXPECT_EQ(1U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ doRecover(); ++ filter().onCreate(); ++ response_headers = {}; ++ EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().encodeHeaders(response_headers, false)); ++ EXPECT_EQ(2U, crash_total.value()); ++ EXPECT_EQ(0U, crash_vm.value()); ++ EXPECT_EQ(2U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ Buffer::OwnedImpl invalid_data("crash"); ++ Buffer::OwnedImpl normal_data(""); ++ ++ EXPECT_NE(Http::FilterDataStatus::Continue, filter().decodeData(invalid_data, false)); ++ EXPECT_EQ(3U, crash_total.value()); ++ EXPECT_EQ(1U, crash_vm.value()); ++ EXPECT_EQ(2U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ doRecover(); ++ filter().onCreate(); ++ EXPECT_EQ(Http::FilterDataStatus::Continue, filter().decodeData(normal_data, false)); ++ EXPECT_EQ(3U, crash_total.value()); ++ EXPECT_EQ(0U, crash_vm.value()); ++ EXPECT_EQ(3U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ EXPECT_NE(Http::FilterDataStatus::Continue, filter().encodeData(invalid_data, false)); ++ EXPECT_EQ(4U, crash_total.value()); ++ EXPECT_EQ(1U, crash_vm.value()); ++ EXPECT_EQ(3U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ doRecover(); ++ filter().onCreate(); ++ EXPECT_EQ(Http::FilterDataStatus::Continue, filter().encodeData(normal_data, false)); ++ EXPECT_EQ(4U, crash_total.value()); ++ EXPECT_EQ(0U, crash_vm.value()); ++ EXPECT_EQ(4U, recover_total.value()); ++ EXPECT_EQ(0U, recover_error.value()); ++ ++ filter().onDestroy(); ++} + #endif + + TEST_P(WasmHttpFilterTest, HeadersStopAndBuffer) { +@@ -708,7 +807,74 @@ + filter().log(&request_headers, &response_headers, &response_trailers, log_stream_info); + filter().onDestroy(); + } ++#if defined(ALIMESH) ++TEST_P(WasmHttpFilterTest, RedisCall) { ++ if (std::get<1>(GetParam()) == "rust") { ++ // This feature is not supported in rust test code ++ return; ++ } ++ ++ setupTest("redis_call"); ++ setupFilter(); ++ ++ Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}}; ++ std::string redis_query{"*3\r\n$3\r\nset\r\n$2\r\nid\r\n$1\r\n1\r\n"}; ++ Redis::MockRedisPoolRequest redis_request( ++ &cluster_manager_.thread_local_cluster_.redis_async_client_, std::string(redis_query)); ++ Redis::AsyncClient::Callbacks* callbacks = nullptr; ++ cluster_manager_.initializeThreadLocalClusters({"cluster"}); ++ ++ EXPECT_CALL(cluster_manager_.thread_local_cluster_, redisAsyncClient()); ++ EXPECT_CALL(cluster_manager_.thread_local_cluster_.redis_async_client_, send_(_, _)) ++ .WillOnce( ++ Invoke([&](std::string& query, Redis::AsyncClient::Callbacks& cb) -> Redis::PoolRequest* { ++ EXPECT_EQ(redis_query, query); ++ callbacks = &cb; ++ return &redis_request; ++ })); + ++ EXPECT_CALL(filter(), log_(spdlog::level::debug, Eq("+OK\r\n"))); ++ EXPECT_CALL(filter(), log_(spdlog::level::warn, Eq("bodysize: 5"))); ++ EXPECT_CALL(filter(), log_(spdlog::level::info, Eq("onRequestHeaders"))) ++ .WillOnce(Invoke([&](uint32_t, absl::string_view) -> proxy_wasm::WasmResult { ++ std::string response{"+OK\r\n"}; ++ callbacks->onSuccess(redis_request.request_, std::move(response)); ++ return proxy_wasm::WasmResult::Ok; ++ })); ++ EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, ++ filter().decodeHeaders(request_headers, false)); ++ ++ EXPECT_NE(callbacks, nullptr); ++} ++ ++TEST_P(WasmHttpFilterTest, DisableClearRouteCache) { ++ if (std::get<1>(GetParam()) == "rust") { ++ // This feature is not supported in rust test code ++ return; ++ } ++ ++ setupTest("", "DisableClearRouteCache"); ++ setupFilter(); ++ EXPECT_CALL(encoder_callbacks_, streamInfo()).WillRepeatedly(ReturnRef(request_stream_info_)); ++ EXPECT_CALL(filter(), log_(spdlog::level::debug, ++ Eq(absl::string_view("onRequestHeaders 2 DisableClearRouteCache")))); ++ EXPECT_CALL(filter(), log_(spdlog::level::info, Eq(absl::string_view("header path /")))); ++ ++ // Verify that route cache is cleared when modifying HTTP request headers. ++ Http::MockStreamDecoderFilterCallbacks decoder_callbacks; ++ filter().setDecoderFilterCallbacks(decoder_callbacks); ++ EXPECT_CALL(decoder_callbacks, clearRouteCache()).Times(0); ++ ++ Http::TestRequestHeaderMapImpl request_headers{{":path", "/"}, {"server", "envoy"}}; ++ EXPECT_EQ(Http::FilterHeadersStatus::Continue, filter().decodeHeaders(request_headers, false)); ++ EXPECT_THAT(request_headers.get_("newheader"), Eq("newheadervalue")); ++ EXPECT_THAT(request_headers.get_("server"), Eq("envoy-wasm")); ++ Http::TestRequestTrailerMapImpl request_trailers{}; ++ EXPECT_EQ(Http::FilterTrailersStatus::Continue, filter().decodeTrailers(request_trailers)); ++ Http::MetadataMap request_metadata{}; ++ EXPECT_EQ(Http::FilterMetadataStatus::Continue, filter().decodeMetadata(request_metadata)); ++} ++#endif + TEST_P(WasmHttpFilterTest, AsyncCall) { + setupTest("async_call"); + setupFilter(); +@@ -1275,13 +1441,13 @@ + } + + TEST_P(WasmHttpFilterTest, GrpcCallAfterDestroyed) { +- std::vector proto_or_cluster; +- proto_or_cluster.push_back("grpc_call"); ++ std::vector> proto_or_cluster; ++ proto_or_cluster.emplace_back("grpc_call", true); + if (std::get<1>(GetParam()) == "cpp") { + // cluster definition passed as a protobuf is only supported in C++ SDK. +- proto_or_cluster.push_back("grpc_call_proto"); ++ proto_or_cluster.emplace_back("grpc_call_proto", false); + } +- for (const auto& id : proto_or_cluster) { ++ for (const auto& [id, success] : proto_or_cluster) { + TestScopedRuntime scoped_runtime; + setupTest(id); + setupFilter(); +@@ -1332,23 +1498,41 @@ + EXPECT_EQ(Http::FilterHeadersStatus::StopAllIterationAndWatermark, + filter().decodeHeaders(request_headers, false)); + +- EXPECT_CALL(request, cancel()).WillOnce([&]() { callbacks = nullptr; }); ++ if (std::get<1>(GetParam()) == "rust") { ++ EXPECT_CALL(request, cancel()).WillOnce([&]() { callbacks = nullptr; }); + +- // Destroy the Context, Plugin and VM. +- context_.reset(); +- plugin_.reset(); +- plugin_handle_.reset(); +- wasm_.reset(); ++ // Destroy the Context, Plugin and VM. ++ context_.reset(); ++ plugin_.reset(); ++ plugin_handle_.reset(); ++ wasm_.reset(); ++ } else { ++ rootContext().onQueueReady(2); ++ ++ // Start shutdown sequence. ++ wasm_->wasm()->startShutdown(); ++ plugin_.reset(); ++ plugin_handle_.reset(); ++ wasm_.reset(); ++ } + + ProtobufWkt::Value value; + value.set_string_value("response"); + std::string response_string; + EXPECT_TRUE(value.SerializeToString(&response_string)); + auto response = std::make_unique(response_string); +- EXPECT_EQ(callbacks, nullptr); ++ if (std::get<1>(GetParam()) == "rust") { ++ EXPECT_EQ(callbacks, nullptr); ++ } else { ++ EXPECT_NE(callbacks, nullptr); ++ } + NiceMock span; + if (callbacks) { +- callbacks->onSuccessRaw(std::move(response), span); ++ if (success) { ++ callbacks->onSuccessRaw(std::move(response), span); ++ } else { ++ callbacks->onFailure(Grpc::Status::WellKnownGrpcStatus::Canceled, "bad", span); ++ } + } + } + } +diff -Naur envoy/test/extensions/filters/network/common/redis/BUILD envoy-new/test/extensions/filters/network/common/redis/BUILD +--- envoy/test/extensions/filters/network/common/redis/BUILD 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/network/common/redis/BUILD 2024-05-19 11:59:31.678438554 +0800 +@@ -20,6 +20,9 @@ + "//source/extensions/filters/network/common/redis:codec_lib", + "//test/test_common:printers_lib", + ], ++ alimesh_deps = [ ++ "//source/extensions/filters/network/common/redis:raw_client_lib", ++ ], + ) + + envoy_cc_test_library( +@@ -60,6 +63,9 @@ + "//test/test_common:simulated_time_system_lib", + "@envoy_api//envoy/extensions/filters/network/redis_proxy/v3:pkg_cc_proto", + ], ++ alimesh_deps = [ ++ "//source/extensions/filters/network/common/redis:raw_client_lib", ++ ], + ) + + envoy_cc_test( +diff -Naur envoy/test/extensions/filters/network/common/redis/client_impl_test.cc envoy-new/test/extensions/filters/network/common/redis/client_impl_test.cc +--- envoy/test/extensions/filters/network/common/redis/client_impl_test.cc 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/network/common/redis/client_impl_test.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -6,6 +6,7 @@ + #include "source/common/network/utility.h" + #include "source/common/upstream/upstream_impl.h" + #include "source/extensions/filters/network/common/redis/client_impl.h" ++#include "source/extensions/filters/network/common/redis/raw_client_impl.h" + #include "source/extensions/filters/network/common/redis/utility.h" + + #include "test/extensions/filters/network/common/redis/mocks.h" +@@ -1216,6 +1217,617 @@ + auth_username, auth_password); + client->close(); + } ++#if defined(ALIMESH) ++class RedisRawClientDefaultConfig : public Config { ++ std::chrono::milliseconds opTimeout() const override { return std::chrono::milliseconds(20); } ++ // Cluster is not supported ++ bool enableHashtagging() const override { return false; } ++ bool enableRedirection() const override { return false; } ++ bool disableOutlierEvents() const override { return false; } ++ // Default value, same to ClientTest(ConfigImpl Default value) ++ unsigned int maxBufferSizeBeforeFlush() const override { return 0; } ++ std::chrono::milliseconds bufferFlushTimeoutInMs() const override { ++ return std::chrono::milliseconds(3); ++ } ++ ReadPolicy readPolicy() const override { return ReadPolicy::Primary; } ++ uint32_t maxUpstreamUnknownConnections() const override { return 100; } ++ // RawClient do not support command stats ++ bool enableCommandStats() const override { return false; } ++}; ++ ++class RedisRawClientImplTest : public testing::Test, ++ public Event::TestUsingSimulatedTime, ++ public Common::Redis::RawDecoderFactory { ++public: ++ // Common::Redis::RawDecoderFactory ++ DecoderPtr create(Common::Redis::RawDecoderCallbacks& callbacks) override { ++ callbacks_ = &callbacks; ++ return Common::Redis::DecoderPtr{decoder_}; ++ } ++ ++ ~RedisRawClientImplTest() override { ++ client_.reset(); ++ ++ EXPECT_TRUE(TestUtility::gaugesZeroed(host_->cluster_.stats_store_.gauges())); ++ EXPECT_TRUE(TestUtility::gaugesZeroed(host_->stats_.gauges())); ++ } ++ ++ void setup() { ++ config_ = std::make_unique(); ++ finishSetup(); ++ } ++ ++ void setup(std::unique_ptr&& config) { ++ config_ = std::move(config); ++ finishSetup(); ++ } ++ ++ void finishSetup() { ++ upstream_connection_ = new NiceMock(); ++ Upstream::MockHost::MockCreateConnectionData conn_info; ++ conn_info.connection_ = upstream_connection_; ++ ++ // Create timers in order they are created in client_impl.cc ++ connect_or_op_timer_ = new Event::MockTimer(&dispatcher_); ++ flush_timer_ = new Event::MockTimer(&dispatcher_); ++ ++ EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _)); ++ EXPECT_CALL(*host_, createConnection_(_, _)).WillOnce(Return(conn_info)); ++ EXPECT_CALL(*upstream_connection_, addReadFilter(_)) ++ .WillOnce(SaveArg<0>(&upstream_read_filter_)); ++ EXPECT_CALL(*upstream_connection_, connect()); ++ EXPECT_CALL(*upstream_connection_, noDelay(true)); ++ ++ redis_command_stats_ = ++ Common::Redis::RedisCommandStats::createRedisCommandStats(stats_.symbolTable()); ++ ++ client_ = RawClientImpl::create(host_, dispatcher_, Common::Redis::RawEncoderPtr{encoder_}, ++ *this, *config_, redis_command_stats_, stats_); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_total_.value()); ++ EXPECT_EQ(1UL, host_->stats_.cx_total_.value()); ++ EXPECT_EQ(false, client_->active()); ++ ++ // NOP currently. ++ upstream_connection_->runHighWatermarkCallbacks(); ++ upstream_connection_->runLowWatermarkCallbacks(); ++ } ++ ++ void onConnected() { ++ EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _)); ++ upstream_connection_->raiseEvent(Network::ConnectionEvent::Connected); ++ } ++ ++ void respond() { ++ std::string response1{"+OK"}; ++ EXPECT_EQ(true, client_->active()); ++ RawClientImpl* client_impl = dynamic_cast(client_.get()); ++ EXPECT_NE(client_impl, nullptr); ++ client_impl->onRawResponse(std::move(response1)); ++ } ++ ++ void testInitializeReadPolicy( ++ envoy::extensions::filters::network::redis_proxy::v3::RedisProxy::ConnPoolSettings::ReadPolicy ++ read_policy) { ++ InSequence s; ++ ++ setup(std::make_unique(createConnPoolSettings(20, true, true, 100, read_policy))); ++ ++ absl::string_view raw_readonly_request = Utility::makeRawReadOnlyRequest(); ++ EXPECT_CALL(*encoder_, encode(raw_readonly_request, _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ client_->initialize(auth_username_, auth_password_); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_total_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_active_.value()); ++ ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ client_->close(); ++ } ++ ++ const std::string cluster_name_{"foo"}; ++ std::shared_ptr host_{new NiceMock()}; ++ Event::MockDispatcher dispatcher_; ++ Event::MockTimer* flush_timer_{}; ++ Event::MockTimer* connect_or_op_timer_{}; ++ MockRawEncoder* encoder_{new MockRawEncoder()}; ++ MockDecoder* decoder_{new MockDecoder()}; ++ Common::Redis::RawDecoderCallbacks* callbacks_{}; ++ NiceMock* upstream_connection_{}; ++ Network::ReadFilterSharedPtr upstream_read_filter_; ++ std::unique_ptr config_; ++ RawClientPtr client_; ++ NiceMock stats_; ++ Stats::ScopeSharedPtr stats_scope_; ++ Common::Redis::RedisCommandStatsSharedPtr redis_command_stats_; ++ std::string auth_username_; ++ std::string auth_password_; ++}; ++ ++TEST_F(RedisRawClientImplTest, Basic) { ++ InSequence s; ++ ++ setup(); ++ ++ client_->initialize(auth_username_, auth_password_); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(request1, _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ ++ std::string request2; ++ MockRawClientCallbacks callbacks2; ++ EXPECT_CALL(*encoder_, encode(request2, _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle2 = client_->makeRawRequest(request2, callbacks2); ++ EXPECT_NE(nullptr, handle2); ++ ++ EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value()); ++ EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_active_.value()); ++ EXPECT_EQ(2UL, host_->stats_.rq_total_.value()); ++ EXPECT_EQ(2UL, host_->stats_.rq_active_.value()); ++ ++ Buffer::OwnedImpl fake_data; ++ EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { ++ InSequence s; ++ std::string response1; ++ EXPECT_CALL(callbacks1, onResponse_(response1)); ++ EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _)); ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _)); ++ callbacks_->onRawResponse(std::move(response1)); ++ ++ std::string response2; ++ EXPECT_CALL(callbacks2, onResponse_(response2)); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _)); ++ callbacks_->onRawResponse(std::move(response2)); ++ })); ++ upstream_read_filter_->onData(fake_data, false); ++ ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ client_->close(); ++} ++ ++TEST(RedisRawClientFactoryImplTest, Basic) { ++ RawClientFactoryImpl factory; ++ Upstream::MockHost::MockCreateConnectionData conn_info; ++ conn_info.connection_ = new NiceMock(); ++ std::shared_ptr host(new NiceMock()); ++ ++ EXPECT_CALL(*host, createConnection_(_, _)).WillOnce(Return(conn_info)); ++ NiceMock dispatcher; ++ ConfigImpl config(createConnPoolSettings()); ++ Stats::IsolatedStoreImpl stats_; ++ auto redis_command_stats = ++ Common::Redis::RedisCommandStats::createRedisCommandStats(stats_.symbolTable()); ++ const std::string auth_username; ++ const std::string auth_password; ++ RawClientPtr client = factory.create(host, dispatcher, config, redis_command_stats, stats_, ++ auth_username, auth_password); ++ client->close(); ++} ++ ++std::string initializeRawCommand(const std::string& command, ++ const std::vector& params) { ++ std::string result; ++ size_t n = params.size() + 1; ++ result.append(fmt::format("*{}\r\n", n)); ++ result.append(fmt::format("${}\r\n{}\r\n", command.size(), command)); ++ for (auto item : params) { ++ result.append(fmt::format("${}\r\n{}\r\n", item.size(), item)); ++ } ++ return result; ++} ++ ++TEST_F(RedisRawClientImplTest, CommandStatsDisableRequest) { ++ InSequence s; ++ ++ setup(); ++ ++ client_->initialize(auth_username_, auth_password_); ++ ++ std::string request1_str = initializeRawCommand("get", {"foo"}); ++ absl::string_view request1{request1_str}; ++ MockRawClientCallbacks callbacks1; ++ ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_total_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_active_.value()); ++ ++ Buffer::OwnedImpl fake_data; ++ EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { ++ InSequence s; ++ ++ simTime().setMonotonicTime(std::chrono::microseconds(10)); ++ ++ EXPECT_CALL(stats_, ++ deliverHistogramToSinks( ++ Property(&Stats::Metric::name, "upstream_commands.upstream_rq_time"), 10)); ++ ++ std::string response1{"+OK\r\n"}; ++ EXPECT_CALL(callbacks1, onResponse_(Eq(response1))); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _)); ++ ++ callbacks_->onRawResponse(std::move(response1)); ++ })); ++ ++ upstream_read_filter_->onData(fake_data, false); ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ client_->close(); ++ ++ // The redis command stats should not show any requests ++ EXPECT_EQ(0UL, stats_.counter("upstream_commands.get.success").value()); ++ EXPECT_EQ(0UL, stats_.counter("upstream_commands.get.failure").value()); ++ EXPECT_EQ(0UL, stats_.counter("upstream_commands.get.total").value()); ++} ++ ++TEST_F(RedisRawClientImplTest, InitializedWithAuthPassword) { ++ InSequence s; ++ ++ setup(); ++ ++ auth_password_ = "testing password"; ++ std::string auth_request = initializeRawCommand("AUTH", {auth_password_}); ++ EXPECT_CALL(*encoder_, encode(Eq(auth_request), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ client_->initialize(auth_username_, auth_password_); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_total_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_active_.value()); ++ ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ client_->close(); ++} ++ ++TEST_F(RedisRawClientImplTest, InitializedWithAuthAcl) { ++ InSequence s; ++ ++ setup(); ++ ++ auth_username_ = "testing username"; ++ auth_password_ = "testing password"; ++ std::string auth_request = initializeRawCommand("AUTH", {auth_username_, auth_password_}); ++ EXPECT_CALL(*encoder_, encode(Eq(auth_request), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ client_->initialize(auth_username_, auth_password_); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_total_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_active_.value()); ++ ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ client_->close(); ++} ++ ++TEST_F(RedisRawClientImplTest, Cancel) { ++ InSequence s; ++ ++ setup(); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ ++ std::string request2; ++ MockRawClientCallbacks callbacks2; ++ EXPECT_CALL(*encoder_, encode(Eq(request2), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle2 = client_->makeRawRequest(request2, callbacks2); ++ EXPECT_NE(nullptr, handle2); ++ ++ handle1->cancel(); ++ ++ Buffer::OwnedImpl fake_data; ++ EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { ++ InSequence s; ++ ++ std::string response1{"$-1\r\n"}; ++ EXPECT_CALL(callbacks1, onResponse_(_)).Times(0); ++ EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _)); ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _)); ++ callbacks_->onRawResponse(std::move(response1)); ++ ++ std::string response2{"*-1\r\n"}; ++ EXPECT_CALL(callbacks2, onResponse_(Eq(response2))); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _)); ++ callbacks_->onRawResponse(std::move(response2)); ++ })); ++ upstream_read_filter_->onData(fake_data, false); ++ ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ client_->close(); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_cancelled_.value()); ++} ++ ++TEST_F(RedisRawClientImplTest, FailAll) { ++ InSequence s; ++ ++ setup(); ++ ++ NiceMock connection_callbacks; ++ client_->addConnectionCallbacks(connection_callbacks); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); ++ EXPECT_CALL(callbacks1, onFailure()); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose)); ++ upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_with_active_rq_.value()); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_remote_with_active_rq_.value()); ++} ++ ++TEST_F(RedisRawClientImplTest, FailAllWithCancel) { ++ InSequence s; ++ ++ setup(); ++ ++ NiceMock connection_callbacks; ++ client_->addConnectionCallbacks(connection_callbacks); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ handle1->cancel(); ++ ++ EXPECT_CALL(callbacks1, onFailure()).Times(0); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::LocalClose)); ++ upstream_connection_->raiseEvent(Network::ConnectionEvent::LocalClose); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_with_active_rq_.value()); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_destroy_local_with_active_rq_.value()); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_cancelled_.value()); ++} ++ ++TEST_F(RedisRawClientImplTest, ProtocolError) { ++ InSequence s; ++ ++ setup(); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ ++ Buffer::OwnedImpl fake_data; ++ EXPECT_CALL(*decoder_, decode(Ref(fake_data))).WillOnce(Invoke([&](Buffer::Instance&) -> void { ++ throw Common::Redis::ProtocolError("error"); ++ })); ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::ExtOriginRequestFailed, _)); ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(callbacks1, onFailure()); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ upstream_read_filter_->onData(fake_data, false); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_protocol_error_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_error_.value()); ++} ++ ++TEST_F(RedisRawClientImplTest, ConnectFail) { ++ InSequence s; ++ ++ setup(); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); ++ EXPECT_CALL(callbacks1, onFailure()); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_fail_.value()); ++ EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); ++} ++ ++TEST_F(RedisRawClientImplTest, OutlierDisabled) { ++ InSequence s; ++ ++ setup(std::make_unique()); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ EXPECT_CALL(host_->outlier_detector_, putResult(_, _)).Times(0); ++ EXPECT_CALL(callbacks1, onFailure()); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_fail_.value()); ++ EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); ++} ++ ++TEST_F(RedisRawClientImplTest, ConnectTimeout) { ++ InSequence s; ++ ++ setup(); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::LocalOriginTimeout, _)); ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(callbacks1, onFailure()); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ connect_or_op_timer_->invokeCallback(); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_cx_connect_timeout_.value()); ++ EXPECT_EQ(1UL, host_->stats_.cx_connect_fail_.value()); ++} ++ ++TEST_F(RedisRawClientImplTest, OpTimeout) { ++ InSequence s; ++ ++ setup(); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_active_.value()); ++ ++ EXPECT_CALL(callbacks1, onResponse_(_)); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _)); ++ respond(); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_total_.value()); ++ EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_rq_active_.value()); ++ ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ EXPECT_CALL(*connect_or_op_timer_, enableTimer(_, _)); ++ handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::LocalOriginTimeout, _)); ++ EXPECT_CALL(*upstream_connection_, close(Network::ConnectionCloseType::NoFlush)); ++ EXPECT_CALL(callbacks1, onFailure()); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ connect_or_op_timer_->invokeCallback(); ++ ++ EXPECT_EQ(1UL, host_->cluster_.stats_.upstream_rq_timeout_.value()); ++ EXPECT_EQ(1UL, host_->stats_.rq_timeout_.value()); ++ EXPECT_EQ(2UL, host_->cluster_.stats_.upstream_rq_total_.value()); ++ EXPECT_EQ(0UL, host_->cluster_.stats_.upstream_rq_active_.value()); ++} ++ ++TEST_F(RedisRawClientImplTest, RemoveFailedHealthCheck) { ++ // This test simulates a health check response signaling traffic should be drained from the host. ++ // As a result, the health checker will close the client in the call back. ++ InSequence s; ++ ++ setup(); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ ++ std::string response1{"$-1\r\n"}; ++ // Each call should result in either onResponse or onFailure, never both. ++ EXPECT_CALL(callbacks1, onFailure()).Times(0); ++ EXPECT_CALL(callbacks1, onResponse_(Eq(response1))).WillOnce(Invoke([&](std::string&) { ++ // The health checker might fail the active health check based on the response content, and ++ // result in removing the host and closing the client. ++ client_->close(); ++ })); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()).Times(2); ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::ExtOriginRequestSuccess, _)); ++ callbacks_->onRawResponse(std::move(response1)); ++} ++ ++TEST_F(RedisRawClientImplTest, RemoveFailedHost) { ++ // This test simulates a health check request failed due to remote host closing the connection. ++ // As a result the health checker will close the client in the call back. ++ InSequence s; ++ ++ setup(); ++ ++ NiceMock connection_callbacks; ++ client_->addConnectionCallbacks(connection_callbacks); ++ ++ std::string request1; ++ MockRawClientCallbacks callbacks1; ++ EXPECT_CALL(*encoder_, encode(Eq(request1), _)); ++ EXPECT_CALL(*flush_timer_, enabled()).WillOnce(Return(false)); ++ PoolRequest* handle1 = client_->makeRawRequest(request1, callbacks1); ++ EXPECT_NE(nullptr, handle1); ++ ++ onConnected(); ++ ++ EXPECT_CALL(host_->outlier_detector_, ++ putResult(Upstream::Outlier::Result::LocalOriginConnectFailed, _)); ++ EXPECT_CALL(callbacks1, onFailure()).WillOnce(Invoke([&]() { client_->close(); })); ++ EXPECT_CALL(*connect_or_op_timer_, disableTimer()); ++ EXPECT_CALL(connection_callbacks, onEvent(Network::ConnectionEvent::RemoteClose)); ++ upstream_connection_->raiseEvent(Network::ConnectionEvent::RemoteClose); ++} ++#endif + } // namespace Client + } // namespace Redis + } // namespace Common +diff -Naur envoy/test/extensions/filters/network/common/redis/codec_impl_test.cc envoy-new/test/extensions/filters/network/common/redis/codec_impl_test.cc +--- envoy/test/extensions/filters/network/common/redis/codec_impl_test.cc 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/network/common/redis/codec_impl_test.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -426,7 +426,176 @@ + buffer_.add("$1\r\na\ra"); + EXPECT_THROW(decoder_.decode(buffer_), ProtocolError); + } ++#if defined(ALIMESH) ++class RedisRawEncoderDecoderImplTest : public testing::Test, RawDecoderCallbacks { ++public: ++ RedisRawEncoderDecoderImplTest() : decoder_(*this) {} + ++ void onRawResponse(std::string&& response) override { decoded_values_.push_back(response); } ++ ++ RawEncoderImpl encoder_; ++ RawDecoderImpl decoder_; ++ Buffer::OwnedImpl buffer_; ++ std::vector decoded_values_; ++}; ++ ++TEST_F(RedisRawEncoderDecoderImplTest, Null) { ++ std::string query{"$-1\r\n"}; ++ encoder_.encode(query, buffer_); ++ // encoder output should be same to input ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ // decoder output should be same to input ++ EXPECT_EQ(query, decoded_values_[0]); ++ // decoder should consume all character ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, Error) { ++ std::string query{"-Error\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, SimpleString) { ++ std::string query{"+simple string\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, BulkString) { ++ std::string query{"$11\r\nbulk string\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, Integer) { ++ std::string query{":9223372036854775807\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, NegativeIntegerSmall) { ++ std::string query{":-1\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, NegativeIntegerLarge) { ++ std::string query{":-9223372036854775808\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, EmptyArray) { ++ std::string query{"*0\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, Array) { ++ std::string query{"*2\r\n$5\r\nhello\r\n:-5\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, NestArray) { ++ std::string query{"*2\r\n*3\r\n$5\r\nhello\r\n:0\r\n$-1\r\n$5\r\nworld\r\n"}; ++ encoder_.encode(query, buffer_); ++ EXPECT_EQ(query, buffer_.toString()); ++ ++ // Test partial decode ++ for (char c : buffer_.toString()) { ++ Buffer::OwnedImpl temp_buffer(&c, 1); ++ decoder_.decode(temp_buffer); ++ EXPECT_EQ(0UL, temp_buffer.length()); ++ } ++ ++ EXPECT_EQ(query, decoded_values_[0]); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, NullArray) { ++ std::string query{"*-1\r\n"}; ++ buffer_.add(query); ++ decoder_.decode(buffer_); ++ EXPECT_EQ(query, decoded_values_[0]); ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, MultipleQuery) { ++ std::vector queries{ ++ "$-1\r\n", ++ "-error\r\n", ++ "+simple string\r\n", ++ "$11\r\nbulk string\r\n", ++ ":9223372036854775807\r\n", ++ ":-1\r\n", ++ ":-9223372036854775808\r\n", ++ "*0\r\n", ++ "*2\r\n$5\r\nhello\r\n:-5\r\n", ++ "*2\r\n*3\r\n$5\r\nhello\r\n:0\r\n$-1\r\n$5\r\nworld\r\n", ++ "*-1\r\n", ++ }; ++ for (auto& query : queries) { ++ buffer_.add(query); ++ } ++ decoder_.decode(buffer_); ++ EXPECT_EQ(queries.size(), decoded_values_.size()); ++ for (size_t i = 0; i < queries.size(); i++) { ++ EXPECT_EQ(queries.at(i), decoded_values_.at(i)); ++ } ++ EXPECT_EQ(0UL, buffer_.length()); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, InvalidType) { ++ buffer_.add("^"); ++ EXPECT_THROW(decoder_.decode(buffer_), ProtocolError); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, InvalidInteger) { ++ buffer_.add(":-a"); ++ EXPECT_THROW(decoder_.decode(buffer_), ProtocolError); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, InvalidIntegerExpectLF) { ++ buffer_.add(":-123\ra"); ++ EXPECT_THROW(decoder_.decode(buffer_), ProtocolError); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, InvalidBulkStringExpectCR) { ++ buffer_.add("$1\r\nab"); ++ EXPECT_THROW(decoder_.decode(buffer_), ProtocolError); ++} ++ ++TEST_F(RedisRawEncoderDecoderImplTest, InvalidBulkStringExpectLF) { ++ buffer_.add("$1\r\na\ra"); ++ EXPECT_THROW(decoder_.decode(buffer_), ProtocolError); ++} ++#endif + } // namespace Redis + } // namespace Common + } // namespace NetworkFilters +diff -Naur envoy/test/extensions/filters/network/common/redis/mocks.cc envoy-new/test/extensions/filters/network/common/redis/mocks.cc +--- envoy/test/extensions/filters/network/common/redis/mocks.cc 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/network/common/redis/mocks.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -29,6 +29,16 @@ + } + + MockEncoder::~MockEncoder() = default; ++#if defined(ALIMESH) ++MockRawEncoder::MockRawEncoder() { ++ ON_CALL(*this, encode(_, _)) ++ .WillByDefault(Invoke([this](absl::string_view value, Buffer::Instance& out) -> void { ++ real_encoder_.encode(value, out); ++ })); ++} ++ ++MockRawEncoder::~MockRawEncoder() = default; ++#endif + + MockDecoder::MockDecoder() = default; + MockDecoder::~MockDecoder() = default; +@@ -52,6 +62,10 @@ + + MockClientCallbacks::MockClientCallbacks() = default; + MockClientCallbacks::~MockClientCallbacks() = default; ++#if defined(ALIMESH) ++MockRawClientCallbacks::MockRawClientCallbacks() = default; ++MockRawClientCallbacks::~MockRawClientCallbacks() = default; ++#endif + + } // namespace Client + +diff -Naur envoy/test/extensions/filters/network/common/redis/mocks.h envoy-new/test/extensions/filters/network/common/redis/mocks.h +--- envoy/test/extensions/filters/network/common/redis/mocks.h 2024-05-19 11:59:43.090438778 +0800 ++++ envoy-new/test/extensions/filters/network/common/redis/mocks.h 2024-05-19 11:59:31.678438554 +0800 +@@ -6,6 +6,7 @@ + + #include "source/extensions/filters/network/common/redis/client_impl.h" + #include "source/extensions/filters/network/common/redis/codec_impl.h" ++#include "source/extensions/filters/network/common/redis/raw_client.h" + + #include "test/test_common/printers.h" + +@@ -34,7 +35,18 @@ + private: + Common::Redis::EncoderImpl real_encoder_; + }; ++#if defined(ALIMESH) ++class MockRawEncoder : public Common::Redis::RawEncoder { ++public: ++ MockRawEncoder(); ++ ~MockRawEncoder() override; ++ ++ MOCK_METHOD(void, encode, (absl::string_view value, Buffer::Instance& out)); + ++private: ++ Common::Redis::RawEncoderImpl real_encoder_; ++}; ++#endif + class MockDecoder : public Common::Redis::Decoder { + public: + MockDecoder(); +@@ -110,7 +122,18 @@ + (Common::Redis::RespValuePtr & value, const std::string& host_address, + bool ask_redirection)); + }; ++#if defined(ALIMESH) ++class MockRawClientCallbacks : public RawClientCallbacks { ++public: ++ MockRawClientCallbacks(); ++ ~MockRawClientCallbacks() override; ++ ++ void onResponse(std::string&& value) override { onResponse_(value); } + ++ MOCK_METHOD(void, onResponse_, (std::string & value)); ++ MOCK_METHOD(void, onFailure, ()); ++}; ++#endif + } // namespace Client + + } // namespace Redis +diff -Naur envoy/test/extensions/filters/network/wasm/test_data/BUILD envoy-new/test/extensions/filters/network/wasm/test_data/BUILD +--- envoy/test/extensions/filters/network/wasm/test_data/BUILD 2024-05-19 11:59:43.094438778 +0800 ++++ envoy-new/test/extensions/filters/network/wasm/test_data/BUILD 2024-05-19 11:59:31.678438554 +0800 +@@ -71,7 +71,4 @@ + "test_panic_cpp.cc", + "test_resume_call_cpp.cc", + ], +- deps = [ +- "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics_lite", +- ], + ) +diff -Naur envoy/test/extensions/filters/network/wasm/test_data/close_stream_rust.rs envoy-new/test/extensions/filters/network/wasm/test_data/close_stream_rust.rs +--- envoy/test/extensions/filters/network/wasm/test_data/close_stream_rust.rs 2024-05-19 11:59:43.094438778 +0800 ++++ envoy-new/test/extensions/filters/network/wasm/test_data/close_stream_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -1,10 +1,9 @@ + use proxy_wasm::traits::{Context, StreamContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_stream_context(|_, _| -> Box { Box::new(TestStream) }); +-} ++}} + + struct TestStream; + +diff -Naur envoy/test/extensions/filters/network/wasm/test_data/logging_rust.rs envoy-new/test/extensions/filters/network/wasm/test_data/logging_rust.rs +--- envoy/test/extensions/filters/network/wasm/test_data/logging_rust.rs 2024-05-19 11:59:43.094438778 +0800 ++++ envoy-new/test/extensions/filters/network/wasm/test_data/logging_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -2,13 +2,12 @@ + use proxy_wasm::traits::{Context, StreamContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_stream_context(|context_id, _| -> Box { + Box::new(TestStream { context_id }) + }); +-} ++}} + + struct TestStream { + context_id: u32, +diff -Naur envoy/test/extensions/filters/network/wasm/test_data/panic_rust.rs envoy-new/test/extensions/filters/network/wasm/test_data/panic_rust.rs +--- envoy/test/extensions/filters/network/wasm/test_data/panic_rust.rs 2024-05-19 11:59:43.094438778 +0800 ++++ envoy-new/test/extensions/filters/network/wasm/test_data/panic_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -1,10 +1,9 @@ + use proxy_wasm::traits::{Context, StreamContext}; + use proxy_wasm::types::*; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_stream_context(|_, _| -> Box { Box::new(TestStream) }); +-} ++}} + + struct TestStream; + +diff -Naur envoy/test/extensions/filters/network/wasm/test_data/resume_call_rust.rs envoy-new/test/extensions/filters/network/wasm/test_data/resume_call_rust.rs +--- envoy/test/extensions/filters/network/wasm/test_data/resume_call_rust.rs 2024-05-19 11:59:43.094438778 +0800 ++++ envoy-new/test/extensions/filters/network/wasm/test_data/resume_call_rust.rs 2024-05-19 11:59:31.678438554 +0800 +@@ -3,8 +3,7 @@ + use proxy_wasm::types::*; + use std::time::Duration; + +-#[no_mangle] +-pub fn _start() { ++proxy_wasm::main! {{ + proxy_wasm::set_log_level(LogLevel::Trace); + proxy_wasm::set_stream_context(|context_id, _| -> Box { + Box::new(TestStream { +@@ -13,7 +12,7 @@ + upstream_callout: None, + }) + }); +-} ++}} + + struct TestStream { + context_id: u32, +diff -Naur envoy/test/mocks/redis/BUILD envoy-new/test/mocks/redis/BUILD +--- envoy/test/mocks/redis/BUILD 2024-05-19 11:59:43.118438779 +0800 ++++ envoy-new/test/mocks/redis/BUILD 2024-05-19 11:59:31.678438554 +0800 +@@ -1,8 +1,30 @@ + load( + "//bazel:envoy_build_system.bzl", ++ "envoy_cc_mock", + "envoy_package", + ) + + licenses(["notice"]) # Apache 2 + + envoy_package() ++ ++envoy_cc_mock( ++ name = "redis_mocks", ++ srcs = ["mocks.cc"], ++ hdrs = ["mocks.h"], ++ external_deps = [ ++ "abseil_strings", ++ ], ++ deps = [ ++ "//envoy/access_log:access_log_interface", ++ "//envoy/buffer:buffer_interface", ++ "//envoy/event:dispatcher_interface", ++ "//envoy/redis:async_client_interface", ++ "//envoy/http:filter_interface", ++ "//source/common/http:conn_manager_config_interface", ++ "//source/common/http:filter_manager_lib", ++ "//source/common/http:header_map_lib", ++ "//test/mocks/event:event_mocks", ++ "//test/mocks/upstream:host_mocks", ++ ], ++) +diff -Naur envoy/test/mocks/redis/mocks.cc envoy-new/test/mocks/redis/mocks.cc +--- envoy/test/mocks/redis/mocks.cc 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/test/mocks/redis/mocks.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -0,0 +1,32 @@ ++#include "mocks.h" ++ ++#include "envoy/buffer/buffer.h" ++#include "envoy/common/optref.h" ++#include "envoy/event/dispatcher.h" ++#include "envoy/http/header_map.h" ++ ++#include "gmock/gmock.h" ++#include "gtest/gtest.h" ++ ++using testing::_; ++using testing::Invoke; ++using testing::Return; ++using testing::ReturnRef; ++ ++namespace Envoy { ++namespace Redis { ++ ++MockRedisAsyncClient::MockRedisAsyncClient() { ++ ON_CALL(*this, dispatcher()).WillByDefault(ReturnRef(dispatcher_)); ++} ++MockRedisAsyncClient::~MockRedisAsyncClient() = default; ++ ++MockRedisPoolRequest::MockRedisPoolRequest(MockRedisAsyncClient* client, std::string&& request) ++ : client_(client), request_(request) {} ++MockRedisPoolRequest::~MockRedisPoolRequest() = default; ++ ++MockRedisAsyncClientCallbacks::MockRedisAsyncClientCallbacks() = default; ++MockRedisAsyncClientCallbacks::~MockRedisAsyncClientCallbacks() = default; ++ ++} // namespace Redis ++} // namespace Envoy +diff -Naur envoy/test/mocks/redis/mocks.h envoy-new/test/mocks/redis/mocks.h +--- envoy/test/mocks/redis/mocks.h 1970-01-01 08:00:00.000000000 +0800 ++++ envoy-new/test/mocks/redis/mocks.h 2024-05-19 11:59:31.678438554 +0800 +@@ -0,0 +1,77 @@ ++#pragma once ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "envoy/access_log/access_log.h" ++#include "envoy/redis/async_client.h" ++#include "envoy/http/filter.h" ++#include "envoy/matcher/matcher.h" ++ ++#include "source/common/http/utility.h" ++ ++#include "test/mocks/common.h" ++#include "test/mocks/event/mocks.h" ++#include "test/mocks/upstream/cluster_info.h" ++#include "test/mocks/upstream/host.h" ++#include "test/test_common/printers.h" ++ ++#include "absl/strings/ascii.h" ++#include "absl/strings/str_cat.h" ++#include "absl/strings/str_join.h" ++#include "gmock/gmock.h" ++ ++using testing::Return; ++ ++namespace Envoy { ++namespace Redis { ++ ++class MockRedisAsyncClient : public Redis::AsyncClient { ++public: ++ MockRedisAsyncClient(); ++ ~MockRedisAsyncClient() override; ++ ++ Redis::PoolRequest* send(std::string&& query, Callbacks& callbacks) override { ++ return send_(query, callbacks); ++ } ++ ++ MOCK_METHOD(void, initialize, (Redis::AsyncClientConfig config), (override)); ++ ++ MOCK_METHOD(Redis::PoolRequest*, send_, (std::string & query, Callbacks& callbacks)); ++ ++ MOCK_METHOD(Event::Dispatcher&, dispatcher, (), (override)); ++ ++ NiceMock dispatcher_; ++}; ++ ++class MockRedisPoolRequest : public Redis::PoolRequest { ++public: ++ MockRedisPoolRequest(MockRedisAsyncClient* client, std::string&& request); ++ ~MockRedisPoolRequest() override; ++ ++ MOCK_METHOD(void, cancel, ()); ++ ++ MockRedisAsyncClient* client_; ++ std::string request_; ++}; ++ ++class MockRedisAsyncClientCallbacks : public Redis::AsyncClient::Callbacks { ++public: ++ MockRedisAsyncClientCallbacks(); ++ ~MockRedisAsyncClientCallbacks() override; ++ ++ // Redis::AsyncClient::Callbacks ++ void onSuccess(std::string_view query, std::string&& response) override { ++ onSuccess_(query, response); ++ } ++ MOCK_METHOD(void, onFailure, (std::string_view query), (override)); ++ ++ MOCK_METHOD(void, onSuccess_, (std::string_view query, std::string& response)); ++}; ++ ++} // namespace Redis ++} // namespace Envoy +diff -Naur envoy/test/mocks/router/mocks.h envoy-new/test/mocks/router/mocks.h +--- envoy/test/mocks/router/mocks.h 2024-05-19 11:59:43.182438780 +0800 ++++ envoy-new/test/mocks/router/mocks.h 2024-05-19 11:59:31.678438554 +0800 +@@ -568,7 +568,7 @@ + ~MockScopedConfig() override; + #if defined(ALIMESH) + MOCK_METHOD(ConfigConstSharedPtr, getRouteConfig, +- (const Http::HeaderMap& headers, const StreamInfo::StreamInfo&), (const)); ++ (const Http::HeaderMap& headers, const StreamInfo::StreamInfo*), (const)); + #else + MOCK_METHOD(ConfigConstSharedPtr, getRouteConfig, (const Http::HeaderMap& headers), (const)); + #endif +diff -Naur envoy/test/mocks/upstream/BUILD envoy-new/test/mocks/upstream/BUILD +--- envoy/test/mocks/upstream/BUILD 2024-05-19 11:59:43.118438779 +0800 ++++ envoy-new/test/mocks/upstream/BUILD 2024-05-19 11:59:31.678438554 +0800 +@@ -216,6 +216,9 @@ + "//test/mocks/upstream:cluster_priority_set_mocks", + "//test/mocks/upstream:load_balancer_mocks", + ], ++ alimesh_deps = [ ++ "//test/mocks/redis:redis_mocks", ++ ], + ) + + envoy_cc_mock( +diff -Naur envoy/test/mocks/upstream/thread_local_cluster.cc envoy-new/test/mocks/upstream/thread_local_cluster.cc +--- envoy/test/mocks/upstream/thread_local_cluster.cc 2024-05-19 11:59:43.118438779 +0800 ++++ envoy-new/test/mocks/upstream/thread_local_cluster.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -18,6 +18,9 @@ + ON_CALL(*this, tcpConnPool(_, _)) + .WillByDefault(Return(Upstream::TcpPoolData([]() {}, &tcp_conn_pool_))); + ON_CALL(*this, httpAsyncClient()).WillByDefault(ReturnRef(async_client_)); ++#if defined(ALIMESH) ++ ON_CALL(*this, redisAsyncClient()).WillByDefault(ReturnRef(redis_async_client_)); ++#endif + } + + MockThreadLocalCluster::~MockThreadLocalCluster() = default; +diff -Naur envoy/test/mocks/upstream/thread_local_cluster.h envoy-new/test/mocks/upstream/thread_local_cluster.h +--- envoy/test/mocks/upstream/thread_local_cluster.h 2024-05-19 11:59:43.118438779 +0800 ++++ envoy-new/test/mocks/upstream/thread_local_cluster.h 2024-05-19 11:59:31.678438554 +0800 +@@ -3,6 +3,7 @@ + #include "envoy/upstream/thread_local_cluster.h" + + #include "test/mocks/http/conn_pool.h" ++#include "test/mocks/redis/mocks.h" + #include "test/mocks/http/mocks.h" + #include "test/mocks/tcp/mocks.h" + +@@ -37,11 +38,17 @@ + (ResourcePriority priority, LoadBalancerContext* context)); + MOCK_METHOD(MockHost::MockCreateConnectionData, tcpConn_, (LoadBalancerContext * context)); + MOCK_METHOD(Http::AsyncClient&, httpAsyncClient, ()); ++#if defined(ALIMESH) ++ MOCK_METHOD(Redis::AsyncClient&, redisAsyncClient, ()); ++#endif + + NiceMock cluster_; + NiceMock lb_; + NiceMock conn_pool_; + NiceMock async_client_; ++#if defined(ALIMESH) ++ NiceMock redis_async_client_; ++#endif + NiceMock tcp_conn_pool_; + }; + +diff -Naur envoy/test/test_common/wasm_base.h envoy-new/test/test_common/wasm_base.h +--- envoy/test/test_common/wasm_base.h 2024-05-19 11:59:43.126438779 +0800 ++++ envoy-new/test/test_common/wasm_base.h 2024-05-19 11:59:31.678438554 +0800 +@@ -147,6 +147,18 @@ + context_->setEncoderFilterCallbacks(encoder_callbacks_); + } + ++#if defined(ALIMESH) ++ template void doRecover() { ++ std::shared_ptr new_handle; ++ if (WasmTestBase::plugin_handle_->doRecover(new_handle)) { ++ WasmTestBase::plugin_handle_ = std::static_pointer_cast(new_handle); ++ WasmTestBase::wasm_ = WasmTestBase::plugin_handle_->wasmHandle(); ++ WasmTestBase::wasm_->wasm()->lifecycleStats().recover_total_.inc(); ++ setupFilterBase(); ++ } ++ } ++#endif ++ + std::unique_ptr context_; + NiceMock decoder_callbacks_; + NiceMock encoder_callbacks_; +diff -Naur envoy/test/tools/wee8_compile/BUILD envoy-new/test/tools/wee8_compile/BUILD +--- envoy/test/tools/wee8_compile/BUILD 2024-05-19 11:59:43.130438779 +0800 ++++ envoy-new/test/tools/wee8_compile/BUILD 2024-05-19 11:59:31.678438554 +0800 +@@ -17,5 +17,10 @@ + envoy_cc_library( + name = "wee8_compile_lib", + srcs = ["wee8_compile.cc"], ++ copts = [ ++ "-Wno-comments", ++ "-Wno-non-virtual-dtor", ++ "-Wno-unused-parameter", ++ ], + external_deps = ["wee8"], + ) +diff -Naur envoy/test/tools/wee8_compile/wee8_compile.cc envoy-new/test/tools/wee8_compile/wee8_compile.cc +--- envoy/test/tools/wee8_compile/wee8_compile.cc 2024-05-19 11:59:43.130438779 +0800 ++++ envoy-new/test/tools/wee8_compile/wee8_compile.cc 2024-05-19 11:59:31.678438554 +0800 +@@ -7,6 +7,7 @@ + #include + #include + ++#include "src/flags/flags.h" + #include "src/wasm/c-api.h" + #include "v8-version.h" + #include "wasm-api/wasm.hh" +@@ -149,6 +150,8 @@ + } + + wasm::vec serializeWasmModule(const char* path, const wasm::vec& content) { ++ ::v8::internal::v8_flags.liftoff = false; ++ ::v8::internal::v8_flags.wasm_max_mem_pages = 16384; /* 16,384 * 64 KiB pages == 1 GiB limit */ + const auto engine = wasm::Engine::make(); + if (engine == nullptr) { + std::cerr << "ERROR: Failed to start V8." << std::endl; +@@ -205,6 +208,18 @@ + + #if defined(__linux__) && defined(__x86_64__) + #define WEE8_PLATFORM "linux_x86_64" ++#elif defined(__linux__) && defined(__aarch64__) ++#define WEE8_PLATFORM "linux_aarch64" ++#elif defined(__linux__) && (defined(__ppc64le__) || defined(__PPC64LE__)) ++#define WEE8_PLATFORM "linux_ppc64le" ++#elif defined(__linux__) && defined(__s390x__) ++#define WEE8_PLATFORM "linux_s390x" ++#elif defined(__APPLE__) && defined(__x86_64__) ++#define WEE8_PLATFORM "macos_x86_64" ++#elif defined(__APPLE__) && defined(__arm64__) ++#define WEE8_PLATFORM "macos_arm64" ++#elif defined(_WIN64) && defined(_M_X64) ++#define WEE8_PLATFORM "windows_x64" + #else + #define WEE8_PLATFORM "" + #endif diff --git a/istio/1.12/patches/proxy/20240519-v8-upgrade.patch b/istio/1.12/patches/proxy/20240519-v8-upgrade.patch new file mode 100644 index 000000000..fac6aa97e --- /dev/null +++ b/istio/1.12/patches/proxy/20240519-v8-upgrade.patch @@ -0,0 +1,38 @@ +diff -Naur proxy/scripts/release-binary.sh proxy-new/scripts/release-binary.sh +--- proxy/scripts/release-binary.sh 2024-05-19 12:33:33.254478650 +0800 ++++ proxy-new/scripts/release-binary.sh 2024-05-19 12:31:11.714475870 +0800 +@@ -112,7 +112,7 @@ + # k8-opt is the output directory for x86_64 optimized builds (-c opt, so --config=release-symbol and --config=release). + # k8-dbg is the output directory for -c dbg builds. + #for config in release release-symbol debug +-for config in release ++for config in release release-symbol + do + case $config in + "release" ) +diff -Naur proxy/scripts/release-binary.sh proxy-new/scripts/release-binary.sh +--- proxy/scripts/release-binary.sh 2024-05-19 12:27:51.030471929 +0800 ++++ proxy-new/scripts/release-binary.sh 2024-05-19 12:04:55.738444918 +0800 +@@ -152,10 +152,6 @@ + echo "Building ${config} proxy" + BINARY_NAME="${HOME}/package/${BINARY_BASE_NAME}.tar.gz" + SHA256_NAME="${HOME}/${BINARY_BASE_NAME}-${SHA}.sha256" +- # All cores are used by com_googlesource_chromium_v8:build within. +- # Prebuild this target to avoid stacking this ram intensive task with others. +- # shellcheck disable=SC2086 +- bazel build ${BAZEL_BUILD_ARGS} ${CONFIG_PARAMS} @com_googlesource_chromium_v8//:build + # shellcheck disable=SC2086 + bazel build ${BAZEL_BUILD_ARGS} ${CONFIG_PARAMS} //src/envoy:envoy_tar + BAZEL_TARGET="${BAZEL_OUT}/src/envoy/envoy_tar.tar.gz" +diff -Naur proxy/tools/deb/test/build_docker.sh proxy-new/tools/deb/test/build_docker.sh +--- proxy/tools/deb/test/build_docker.sh 2024-05-19 12:27:51.030471929 +0800 ++++ proxy-new/tools/deb/test/build_docker.sh 2024-05-19 12:05:07.978445159 +0800 +@@ -20,8 +20,6 @@ + # Script requires a working docker on the test machine + # It is run in the proxy dir, will create a docker image with proxy deb installed + +- +-bazel build @com_googlesource_chromium_v8//:build + bazel build tools/deb:istio-proxy + + PROJECT="istio-testing" diff --git a/plugins/wasm-cpp/extensions/basic_auth/BUILD b/plugins/wasm-cpp/extensions/basic_auth/BUILD index d36baab02..40f87c801 100644 --- a/plugins/wasm-cpp/extensions/basic_auth/BUILD +++ b/plugins/wasm-cpp/extensions/basic_auth/BUILD @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -load("@proxy_wasm_cpp_sdk//bazel/wasm:wasm.bzl", "wasm_cc_binary") +load("@proxy_wasm_cpp_sdk//bazel:defs.bzl", "proxy_wasm_cc_binary") load("//bazel:wasm.bzl", "declare_wasm_image_targets") -wasm_cc_binary( +proxy_wasm_cc_binary( name = "basic_auth.wasm", srcs = [ "plugin.cc", @@ -28,7 +28,6 @@ wasm_cc_binary( "//common:crypto_util", "@com_google_absl//absl/strings", "@com_google_absl//absl/time", - "@proxy_wasm_cpp_sdk//:proxy_wasm_intrinsics", ], )