From 2579b039d64d854eafa4bee37d02117c5707c3cf Mon Sep 17 00:00:00 2001 From: Linus Seelinger Date: Tue, 14 Nov 2023 10:50:18 +0100 Subject: [PATCH 01/21] Initial shared memory support; mechanism for falling back to HTTP transfer still missing --- lib/umbridge.h | 157 ++++++++++++++++++++++++++++++++++++++++++++----- umbridge/um.py | 58 ++++++++++++++++++ 2 files changed, 199 insertions(+), 16 deletions(-) diff --git a/lib/umbridge.h b/lib/umbridge.h index 9552ccf5..a1e74243 100644 --- a/lib/umbridge.h +++ b/lib/umbridge.h @@ -9,7 +9,7 @@ #include #include - +#include #include "json.hpp" #include "httplib.h" @@ -86,6 +86,53 @@ namespace umbridge { } } + class SharedMemoryVector { + public: + SharedMemoryVector(std::size_t size, std::string shmem_name, bool create) + : length(size * sizeof(double)), shmem_name(shmem_name) { + int oflags = O_RDWR; + if (create) { + oflags |= O_CREAT; + } + + int fd = shm_open(shmem_name.c_str(), oflags, 0644 ); // Create shared memory + ftruncate(fd, length); // Set size of shared memory + assert (fd>0); + + ptr = (u_char *) mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); // Map shared memory to process + close(fd); + + assert (ptr); + } + + SharedMemoryVector(const std::vector& vector, std::string shmem_name) + : SharedMemoryVector(vector.size(), shmem_name, true) { + SetVector(vector); + } + + std::vector GetVector() { + std::vector vector(length / sizeof(double)); + memcpy(vector.data(), ptr, length); + return vector; + } + + void SetVector(const std::vector& vector) { + memcpy(ptr, vector.data(), length); + } + + ~SharedMemoryVector() { + munmap(ptr, length); + shm_unlink(shmem_name.c_str()); + } + + private: + u_char *ptr = nullptr; + off_t length = 0; + std::string shmem_name; + }; + + + // Client-side Model connecting to a server for the actual evaluations etc. class HTTPModel : public Model { public: @@ -114,6 +161,7 @@ namespace umbridge { supportsGradient = supported_features.value("Gradient", false); supportsApplyJacobian = supported_features.value("ApplyJacobian", false); supportsApplyHessian = supported_features.value("ApplyHessian", false); + supportsEvaluateShMem = supported_features.value("EvaluateShMem", false); } else { throw std::runtime_error("POST ModelInfo failed with error type '" + to_string(res.error()) + "'"); } @@ -155,24 +203,58 @@ namespace umbridge { std::vector> Evaluate(const std::vector>& inputs, json config_json = json::parse("{}")) override { - json request_body; - request_body["name"] = name; - - for (std::size_t i = 0; i < inputs.size(); i++) { - request_body["input"][i] = inputs[i]; - } - request_body["config"] = config_json; + if (supportsEvaluateShMem) { + std::cout << "Using shared memory" << std::endl; - if (auto res = cli.Post("/Evaluate", headers, request_body.dump(), "application/json")) { - json response_body = parse_result_with_error_handling(res); + std::vector> shmem_inputs; + for (int i = 0; i < inputs.size(); i++) { + shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(i))); + } + std::vector> shmem_outputs; + std::vector output_sizes = GetOutputSizes(config_json); // Potential optimization: Avoid this call (e.g. share output memory with appropriate dimension from server side, sync with client via POSIX semaphore) + for (int i = 0; i < output_sizes.size(); i++) { + shmem_outputs.push_back(std::make_unique(output_sizes[i], "/umbridge_out_" + std::to_string(i), true)); + } - std::vector> outputs(response_body["output"].size()); - for (std::size_t i = 0; i < response_body["output"].size(); i++) { - outputs[i] = response_body["output"][i].get>(); + json request_body; + request_body["name"] = name; + request_body["config"] = config_json; + request_body["shmem_name"] = "/umbridge"; + request_body["shmem_num_inputs"] = inputs.size(); + for (int i = 0; i < inputs.size(); i++) { + request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); } - return outputs; + if (auto res = cli.Post("/EvaluateShMem", headers, request_body.dump(), "application/json")) { + json response_body = parse_result_with_error_handling(res); + + std::vector> outputs(output_sizes.size()); + for (int i = 0; i < output_sizes.size(); i++) { + outputs[i] = shmem_outputs[i]->GetVector(); + } + return outputs; + } else { + throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + } + } else { - throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + json request_body; + request_body["name"] = name; + for (std::size_t i = 0; i < inputs.size(); i++) { + request_body["input"][i] = inputs[i]; + } + request_body["config"] = config_json; + + if (auto res = cli.Post("/Evaluate", headers, request_body.dump(), "application/json")) { + json response_body = parse_result_with_error_handling(res); + + std::vector> outputs(response_body["output"].size()); + for (std::size_t i = 0; i < response_body["output"].size(); i++) { + outputs[i] = response_body["output"][i].get>(); + } + return outputs; + } else { + throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + } } } @@ -278,6 +360,7 @@ namespace umbridge { bool supportsGradient = false; bool supportsApplyJacobian = false; bool supportsApplyHessian = false; + bool supportsEvaluateShMem = false; json parse_result_with_error_handling(const httplib::Result& res) const { json response_body; @@ -405,7 +488,7 @@ namespace umbridge { void log_request(const httplib::Request& req, const httplib::Response& res) { std::cout << "Incoming request from: " << req.remote_addr << " | Type: " << req.method << " " << req.path << " -> " << res.status << std::endl; - + } // Get model from name @@ -475,6 +558,47 @@ namespace umbridge { res.set_content(response_body.dump(), "application/json"); }); + svr.Post("/EvaluateShMem", [&](const httplib::Request &req, httplib::Response &res) { + json request_body = json::parse(req.body); + if (!check_model_exists(models, request_body["name"], res)) + return; + Model& model = get_model_from_name(models, request_body["name"]); + + if (!model.SupportsEvaluate()) { + write_unsupported_feature_response(res, "Evaluate"); + return; + } + + std::vector> inputs; + for (int i = 0; i < request_body["shmem_num_inputs"].get(); i++) { + SharedMemoryVector shmem_input(request_body["shmem_size_" + std::to_string(i)].get(), request_body["shmem_name"].get() + "_in_" + std::to_string(i), false); + inputs.push_back(shmem_input.GetVector()); + } + std::vector> shmem_outputs; + for (int i = 0; i < model.GetOutputSizes().size(); i++) { + shmem_outputs.push_back(std::make_unique(model.GetOutputSizes()[i], request_body["shmem_name"].get() + "_out_" + std::to_string(i), false)); + } + + json empty_default_config; + json config_json = request_body.value("config", empty_default_config); + + if (!check_input_sizes(inputs, config_json, model, res)) + return; + + const std::lock_guard model_lock(model_mutex); + std::vector> outputs = model.Evaluate(inputs, config_json); + + if (!check_output_sizes(outputs, config_json, model, res)) + return; + + for (std::size_t i = 0; i < outputs.size(); i++) { + shmem_outputs[i]->SetVector(outputs[i]); + } + + json response_body; + res.set_content(response_body.dump(), "application/json"); + }); + svr.Post("/Gradient", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) @@ -626,6 +750,7 @@ namespace umbridge { json response_body; response_body["support"] = {}; response_body["support"]["Evaluate"] = model.SupportsEvaluate(); + response_body["support"]["EvaluateShMem"] = model.SupportsEvaluate(); response_body["support"]["Gradient"] = model.SupportsGradient(); response_body["support"]["ApplyJacobian"] = model.SupportsApplyJacobian(); response_body["support"]["ApplyHessian"] = model.SupportsApplyHessian(); diff --git a/umbridge/um.py b/umbridge/um.py index 7248bf17..23b93e8b 100755 --- a/umbridge/um.py +++ b/umbridge/um.py @@ -2,6 +2,8 @@ import requests import asyncio from concurrent.futures import ThreadPoolExecutor +from multiprocessing import shared_memory +import numpy as np class Model(object): @@ -221,6 +223,61 @@ async def evaluate(request): return web.Response(text=f"{{\"output\": {output} }}") + @routes.post('/EvaluateShMem') + async def evaluate(request): + + req_json = await request.json() + model_name = req_json["name"] + model = get_model_from_name(model_name) + if model is None: + return model_not_found_response(req_json["name"]) + if not model.supports_evaluate(): + return error_response("UnsupportedFeature", "Evaluate not supported by model!", 400) + + config = {} + if "config" in req_json: + config = req_json["config"] + + parameters = [] + for i in range(req_json["shmem_num_inputs"]): + shm_c = shared_memory.SharedMemory(req_json["shmem_name"] + f"_in_{i}", False, req_json[f"shmem_size_{i}"]) + raw_shmem_parameter = np.ndarray((req_json[f"shmem_size_{i}"],), dtype=np.float64, buffer=shm_c.buf) + parameters.append(raw_shmem_parameter.tolist()) + shm_c.close() + shm_c.unlink() + + # Check if parameter dimensions match model input sizes + if len(parameters) != len(model.get_input_sizes(config)): + return error_response("InvalidInput", "Number of input parameters does not match model number of model inputs!", 400) + for i in range(len(parameters)): + if len(parameters[i]) != model.get_input_sizes(config)[i]: + return error_response("InvalidInput", f"Input parameter {i} has invalid length! Expected {model.get_input_sizes(config)[i]} but got {len(parameters[i])}.", 400) + + output_future = model_executor.submit(model.__call__, parameters, config) + output = await asyncio.wrap_future(output_future) + + # Check if output is a list of lists + if not isinstance(output, list): + return error_response("InvalidOutput", "Model output is not a list of lists!", 500) + if not all (isinstance(x, list) for x in output): + return error_response("InvalidOutput", "Model output is not a list of lists!", 500) + + # Check if output dimensions match model output sizes + if len(output) != len(model.get_output_sizes(config)): + return error_response("InvalidOutput", "Number of output vectors returned by model does not match number of model outputs declared by model!", 500) + for i in range(len(output)): + if len(output[i]) != model.get_output_sizes(config)[i]: + return error_response("InvalidOutput", f"Output vector {i} has invalid length! Model declared {model.get_output_sizes(config)[i]} but returned {len(output[i])}.", 500) + + # Write output to shared memory + for i in range(len(output)): + shm_c = shared_memory.SharedMemory(req_json["shmem_name"] + f"_out_{i}", create=False, size=len(output[i])*8) + raw_shmem_output = np.ndarray((len(output[i]),), dtype=np.float64, buffer=shm_c.buf) + raw_shmem_output[:] = output[i] + shm_c.close() + + return web.Response(text="{}") + @routes.post('/Gradient') async def gradient(request): @@ -402,6 +459,7 @@ async def modelinfo(request): return model_not_found_response(req_json["name"]) response_body = {"support": {}} response_body["support"]["Evaluate"] = model.supports_evaluate() + response_body["support"]["EvaluateShMem"] = model.supports_evaluate() response_body["support"]["Gradient"] = model.supports_gradient() response_body["support"]["ApplyJacobian"] = model.supports_apply_jacobian() response_body["support"]["ApplyHessian"] = model.supports_apply_hessian() From d18d8492b3cbdf104f3807c3787c3523e6ca111f Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Sun, 17 Mar 2024 22:03:06 +0100 Subject: [PATCH 02/21] Update umbridge.h Shared memory functionality is expanded and now supports input and output for: Evaluate, Gradient, Jacobian and Hessian. Umbridge automatically attempts a shared memory transmission in order to detect accessibility and falls back to full HTTP/JSON transmission on failure. Currently, the shared memory segments will only be compiled on Linux, may be expanded to Apple Mac (needs testing). --- lib/umbridge.h | 860 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 648 insertions(+), 212 deletions(-) diff --git a/lib/umbridge.h b/lib/umbridge.h index a1e74243..076f1ae4 100644 --- a/lib/umbridge.h +++ b/lib/umbridge.h @@ -1,150 +1,195 @@ #ifndef UMBRIDGE #define UMBRIDGE +// Only enable shared memory functionality on Linux as it supports POSIX standard (Apple Mac probably too, needs testing of shared memory and pthread_self). +// TO-DO?: Future support for Windows will require an implementation using WinAPI considering different behaviour than POSIX. +#if defined __linux__ +#define SUPPORT_POSIX_SHMEM +#endif + // #define LOGGING // Increase timeout to allow for long-running models. // This should be (to be on the safe side) significantly greater than the maximum time your model may take -#define CPPHTTPLIB_READ_TIMEOUT_SECOND 60*60 +#define CPPHTTPLIB_READ_TIMEOUT_SECOND 60 * 60 #include #include + +#ifdef SUPPORT_POSIX_SHMEM #include +#include +#endif #include "json.hpp" #include "httplib.h" using json = nlohmann::json; -namespace umbridge { +namespace umbridge +{ - class Model { + class Model + { public: Model(std::string name) : name(name) {} - virtual std::vector GetInputSizes(const json& config_json = json::parse("{}")) const = 0; - virtual std::vector GetOutputSizes(const json& config_json = json::parse("{}")) const = 0; + virtual std::vector GetInputSizes(const json &config_json = json::parse("{}")) const = 0; + virtual std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const = 0; - virtual std::vector> Evaluate(const std::vector>& inputs, - json config_json = json::parse("{}")) { - (void)inputs; (void)config_json; // Avoid unused argument warnings + virtual std::vector> Evaluate(const std::vector> &inputs, + json config_json = json::parse("{}")) + { + (void)inputs; + (void)config_json; // Avoid unused argument warnings throw std::runtime_error("Evaluate was called, but not implemented by model!"); } virtual std::vector Gradient(unsigned int outWrt, - unsigned int inWrt, - const std::vector>& inputs, - const std::vector& sens, - json config_json = json::parse("{}")) { - (void)outWrt; (void)inWrt; (void)inputs; (void)sens; (void)config_json; // Avoid unused argument warnings + unsigned int inWrt, + const std::vector> &inputs, + const std::vector &sens, + json config_json = json::parse("{}")) + { + (void)outWrt; + (void)inWrt; + (void)inputs; + (void)sens; + (void)config_json; // Avoid unused argument warnings throw std::runtime_error("Gradient was called, but not implemented by model!"); } virtual std::vector ApplyJacobian(unsigned int outWrt, - unsigned int inWrt, - const std::vector>& inputs, - const std::vector& vec, - json config_json = json::parse("{}")) { - (void)outWrt; (void)inWrt; (void)inputs; (void)vec; (void)config_json; // Avoid unused argument warnings + unsigned int inWrt, + const std::vector> &inputs, + const std::vector &vec, + json config_json = json::parse("{}")) + { + (void)outWrt; + (void)inWrt; + (void)inputs; + (void)vec; + (void)config_json; // Avoid unused argument warnings throw std::runtime_error("ApplyJacobian was called, but not implemented by model!"); } virtual std::vector ApplyHessian(unsigned int outWrt, - unsigned int inWrt1, - unsigned int inWrt2, - const std::vector>& inputs, - const std::vector& sens, - const std::vector& vec, - json config_json = json::parse("{}")) { - (void)outWrt; (void)inWrt1; (void)inWrt2; (void)inputs; (void)sens; (void)vec; (void)config_json; // Avoid unused argument warnings + unsigned int inWrt1, + unsigned int inWrt2, + const std::vector> &inputs, + const std::vector &sens, + const std::vector &vec, + json config_json = json::parse("{}")) + { + (void)outWrt; + (void)inWrt1; + (void)inWrt2; + (void)inputs; + (void)sens; + (void)vec; + (void)config_json; // Avoid unused argument warnings throw std::runtime_error("ApplyHessian was called, but not implemented by model!"); } - virtual bool SupportsEvaluate() {return false;} - virtual bool SupportsGradient() {return false;} - virtual bool SupportsApplyJacobian() {return false;} - virtual bool SupportsApplyHessian() {return false;} + virtual bool SupportsEvaluate() { return false; } + virtual bool SupportsGradient() { return false; } + virtual bool SupportsApplyJacobian() { return false; } + virtual bool SupportsApplyHessian() { return false; } - std::string GetName() const {return name;} + std::string GetName() const { return name; } protected: std::string name; }; - std::vector SupportedModels(std::string host, httplib::Headers headers = httplib::Headers()) { + std::vector SupportedModels(std::string host, httplib::Headers headers = httplib::Headers()) + { httplib::Client cli(host.c_str()); - if (auto res = cli.Get("/Info", headers)) { + if (auto res = cli.Get("/Info", headers)) + { json response = json::parse(res->body); - if (response.value("protocolVersion",0) != 1.0) + if (response.value("protocolVersion", 0) != 1.0) throw std::runtime_error("Model protocol version not supported!"); return response["models"]; - - } else { + } + else + { throw std::runtime_error("GET Info failed with error type '" + to_string(res.error()) + "'"); } } - - class SharedMemoryVector { +#ifdef SUPPORT_POSIX_SHMEM + class SharedMemoryVector + { public: SharedMemoryVector(std::size_t size, std::string shmem_name, bool create) - : length(size * sizeof(double)), shmem_name(shmem_name) { + : length(size * sizeof(double)), shmem_name(shmem_name) + { int oflags = O_RDWR; - if (create) { + if (create) + { + created = true; oflags |= O_CREAT; } - int fd = shm_open(shmem_name.c_str(), oflags, 0644 ); // Create shared memory - ftruncate(fd, length); // Set size of shared memory - assert (fd>0); + int fd = shm_open(shmem_name.c_str(), oflags, 0644); // Create shared memory + ftruncate(fd, length); // Set size of shared memory + assert(fd > 0); - ptr = (u_char *) mmap(NULL, length, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); // Map shared memory to process + ptr = (u_char *)mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); // Map shared memory to process close(fd); - assert (ptr); + assert(ptr); } - SharedMemoryVector(const std::vector& vector, std::string shmem_name) - : SharedMemoryVector(vector.size(), shmem_name, true) { + SharedMemoryVector(const std::vector &vector, std::string shmem_name) + : SharedMemoryVector(vector.size(), shmem_name, true) + { SetVector(vector); } - std::vector GetVector() { + std::vector GetVector() + { std::vector vector(length / sizeof(double)); memcpy(vector.data(), ptr, length); return vector; } - void SetVector(const std::vector& vector) { + void SetVector(const std::vector &vector) + { memcpy(ptr, vector.data(), length); } - ~SharedMemoryVector() { + ~SharedMemoryVector() + { munmap(ptr, length); - shm_unlink(shmem_name.c_str()); + if (created) + shm_unlink(shmem_name.c_str()); } private: + bool created = false; u_char *ptr = nullptr; off_t length = 0; std::string shmem_name; }; - - +#endif // Client-side Model connecting to a server for the actual evaluations etc. - class HTTPModel : public Model { + class HTTPModel : public Model + { public: - HTTPModel(std::string host, std::string name, httplib::Headers headers = httplib::Headers()) - : Model(name), cli(host.c_str()), headers(headers) + : Model(name), cli(host.c_str()), headers(headers) { // Check if requested model is available on server std::vector models = SupportedModels(host, headers); - if (std::find(models.begin(), models.end(), name) == models.end()) { + if (std::find(models.begin(), models.end(), name) == models.end()) + { std::string model_names = ""; - for (auto& m : models) { + for (auto &m : models) + { model_names += "'" + m + "' "; } throw std::runtime_error("Model " + name + " not found on server! Available models: " + model_names + "."); @@ -153,7 +198,8 @@ namespace umbridge { json request_body; request_body["name"] = name; - if (auto res = cli.Post("/ModelInfo", headers, request_body.dump(), "application/json")) { + if (auto res = cli.Post("/ModelInfo", headers, request_body.dump(), "application/json")) + { json response = json::parse(res->body); json supported_features = response.at("support"); @@ -161,198 +207,402 @@ namespace umbridge { supportsGradient = supported_features.value("Gradient", false); supportsApplyJacobian = supported_features.value("ApplyJacobian", false); supportsApplyHessian = supported_features.value("ApplyHessian", false); +#ifdef SUPPORT_POSIX_SHMEM supportsEvaluateShMem = supported_features.value("EvaluateShMem", false); - } else { + supportsGradientShMem = supported_features.value("GradientShMem", false); + supportsApplyJacobianShMem = supported_features.value("ApplyJacobianShMem", false); + supportsApplyHessianShMem = supported_features.value("ApplyHessianShMem", false); +#endif + } + else + { throw std::runtime_error("POST ModelInfo failed with error type '" + to_string(res.error()) + "'"); } +#ifdef SUPPORT_POSIX_SHMEM + // Test whether client and server are able to communicate through shared memory. Disables ShMem if test fails. + unsigned long int tid = pthread_self(); + request_body["tid"] = std::to_string(tid); + std::vector testvec = {12345.0}; + SharedMemoryVector shmem_input(testvec, "/umbridge_test_shmem_in_" + std::to_string(tid)); + SharedMemoryVector shmem_output(1, "/umbridge_test_shmem_out_" + std::to_string(tid), true); + auto res = cli.Post("/TestShMem", headers, request_body.dump(), "application/json"); + + if (shmem_output.GetVector()[0] != testvec[0]) + { + std::cout << shmem_output.GetVector()[0] << std::endl; + supportsEvaluateShMem = false; + supportsApplyJacobianShMem = false; + supportsApplyHessianShMem = false; + supportsGradientShMem = false; + std::cout << "Server not accessible via shared memory" << std::endl; + } + else + { + std::cout << "Server accessible via shared memory" << std::endl; + } +#endif } - std::vector GetInputSizes(const json& config_json = json::parse("{}")) const override { + std::vector GetInputSizes(const json &config_json = json::parse("{}")) const override + { json request_body; request_body["name"] = name; if (!config_json.empty()) request_body["config"] = config_json; - if (auto res = cli.Post("/InputSizes", headers, request_body.dump(), "application/json")) { + if (auto res = cli.Post("/InputSizes", headers, request_body.dump(), "application/json")) + { json response_body = parse_result_with_error_handling(res); std::vector outputvec = response_body["inputSizes"].get>(); return outputvec; - } else { + } + else + { throw std::runtime_error("POST InputSizes failed with error type '" + to_string(res.error()) + "'"); return std::vector(0); } } - std::vector GetOutputSizes(const json& config_json = json::parse("{}")) const override { + std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const override + { json request_body; request_body["name"] = name; if (!config_json.empty()) request_body["config"] = config_json; - if (auto res = cli.Post("/OutputSizes", headers, request_body.dump(), "application/json")) { + if (auto res = cli.Post("/OutputSizes", headers, request_body.dump(), "application/json")) + { json response_body = parse_result_with_error_handling(res); std::vector outputvec = response_body["outputSizes"].get>(); return outputvec; - } else { + } + else + { throw std::runtime_error("POST OutputSizes failed with error type '" + to_string(res.error()) + "'"); return std::vector(0); } } - std::vector> Evaluate(const std::vector>& inputs, json config_json = json::parse("{}")) override { - - if (supportsEvaluateShMem) { - std::cout << "Using shared memory" << std::endl; - + std::vector> Evaluate(const std::vector> &inputs, json config_json = json::parse("{}")) override + { +#ifdef SUPPORT_POSIX_SHMEM + if (supportsEvaluateShMem) + { + unsigned int tid = pthread_self(); std::vector> shmem_inputs; - for (int i = 0; i < inputs.size(); i++) { - shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(i))); + for (int i = 0; i < inputs.size(); i++) + { + shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(tid) + "_" + std::to_string(i))); } std::vector> shmem_outputs; std::vector output_sizes = GetOutputSizes(config_json); // Potential optimization: Avoid this call (e.g. share output memory with appropriate dimension from server side, sync with client via POSIX semaphore) - for (int i = 0; i < output_sizes.size(); i++) { - shmem_outputs.push_back(std::make_unique(output_sizes[i], "/umbridge_out_" + std::to_string(i), true)); + for (int i = 0; i < output_sizes.size(); i++) + { + shmem_outputs.push_back(std::make_unique(output_sizes[i], "/umbridge_out_" + std::to_string(tid) + "_" + std::to_string(i), true)); } json request_body; + request_body["tid"] = std::to_string(tid); request_body["name"] = name; request_body["config"] = config_json; request_body["shmem_name"] = "/umbridge"; request_body["shmem_num_inputs"] = inputs.size(); - for (int i = 0; i < inputs.size(); i++) { + for (int i = 0; i < inputs.size(); i++) + { request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); } - if (auto res = cli.Post("/EvaluateShMem", headers, request_body.dump(), "application/json")) { + if (auto res = cli.Post("/EvaluateShMem", headers, request_body.dump(), "application/json")) + { json response_body = parse_result_with_error_handling(res); std::vector> outputs(output_sizes.size()); - for (int i = 0; i < output_sizes.size(); i++) { + for (int i = 0; i < output_sizes.size(); i++) + { outputs[i] = shmem_outputs[i]->GetVector(); } return outputs; - } else { + } + else + { throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); } - - } else { + } + else + { +#endif json request_body; request_body["name"] = name; - for (std::size_t i = 0; i < inputs.size(); i++) { + for (std::size_t i = 0; i < inputs.size(); i++) + { request_body["input"][i] = inputs[i]; } request_body["config"] = config_json; - if (auto res = cli.Post("/Evaluate", headers, request_body.dump(), "application/json")) { + if (auto res = cli.Post("/Evaluate", headers, request_body.dump(), "application/json")) + { json response_body = parse_result_with_error_handling(res); std::vector> outputs(response_body["output"].size()); - for (std::size_t i = 0; i < response_body["output"].size(); i++) { + for (std::size_t i = 0; i < response_body["output"].size(); i++) + { outputs[i] = response_body["output"][i].get>(); } return outputs; - } else { + } + else + { throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); } +#ifdef SUPPORT_POSIX_SHMEM } +#endif } std::vector Gradient(unsigned int outWrt, - unsigned int inWrt, - const std::vector>& inputs, - const std::vector& sens, - json config_json = json::parse("{}")) override + unsigned int inWrt, + const std::vector> &inputs, + const std::vector &sens, + json config_json = json::parse("{}")) override { +#ifdef SUPPORT_POSIX_SHMEM + if (supportsGradientShMem) + { + unsigned int tid = pthread_self(); + std::vector> shmem_inputs; + for (int i = 0; i < inputs.size(); i++) + { + shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(tid) + "_" + std::to_string(i))); + } + SharedMemoryVector shmem_output(inputs[inWrt].size(), "/umbridge_out_" + std::to_string(tid) + "_" + std::to_string(0), true); - json request_body; - request_body["name"] = name; - request_body["outWrt"] = outWrt; - request_body["inWrt"] = inWrt; - for (std::size_t i = 0; i < inputs.size(); i++) { - request_body["input"][i] = inputs[i]; + json request_body; + request_body["tid"] = std::to_string(tid); + request_body["name"] = name; + request_body["config"] = config_json; + request_body["outWrt"] = outWrt; + request_body["inWrt"] = inWrt; + request_body["shmem_name"] = "/umbridge"; + request_body["sens"] = sens; + request_body["shmem_num_inputs"] = inputs.size(); + for (int i = 0; i < inputs.size(); i++) + { + request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); + } + if (auto res = cli.Post("/GradientShMem", headers, request_body.dump(), "application/json")) + { + json response_body = parse_result_with_error_handling(res); + + std::vector output(inputs[inWrt].size()); + output = shmem_output.GetVector(); + return output; + } + else + { + throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + } } - request_body["sens"] = sens; - request_body["config"] = config_json; + else + { +#endif + json request_body; + request_body["name"] = name; + request_body["outWrt"] = outWrt; + request_body["inWrt"] = inWrt; + for (std::size_t i = 0; i < inputs.size(); i++) + { + request_body["input"][i] = inputs[i]; + } + request_body["sens"] = sens; + request_body["config"] = config_json; - if (auto res = cli.Post("/Gradient", headers, request_body.dump(), "application/json")) { - json response_body = parse_result_with_error_handling(res); + if (auto res = cli.Post("/Gradient", headers, request_body.dump(), "application/json")) + { + json response_body = parse_result_with_error_handling(res); - return response_body["output"].get>(); - } else { - throw std::runtime_error("POST Gradient failed with error type '" + to_string(res.error()) + "'"); + return response_body["output"].get>(); + } + else + { + throw std::runtime_error("POST Gradient failed with error type '" + to_string(res.error()) + "'"); + } +#ifdef SUPPORT_POSIX_SHMEM } +#endif } std::vector ApplyJacobian(unsigned int outWrt, - unsigned int inWrt, - const std::vector>& inputs, - const std::vector& vec, - json config_json = json::parse("{}")) override { + unsigned int inWrt, + const std::vector> &inputs, + const std::vector &vec, + json config_json = json::parse("{}")) override + { +#ifdef SUPPORT_POSIX_SHMEM + if (supportsApplyJacobianShMem) + { + unsigned int tid = pthread_self(); + std::vector> shmem_inputs; + for (int i = 0; i < inputs.size(); i++) + { + shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(tid) + "_" + std::to_string(i))); + } + std::vector output_sizes = GetOutputSizes(config_json); // Potential optimization: Avoid this call (e.g. share output memory with appropriate dimension from server side, sync with client via POSIX semaphore) + SharedMemoryVector shmem_output(output_sizes[outWrt], "/umbridge_out_" + std::to_string(tid) + "_" + std::to_string(0), true); - json request_body; - request_body["name"] = name; - request_body["outWrt"] = outWrt; - request_body["inWrt"] = inWrt; - for (std::size_t i = 0; i < inputs.size(); i++) { - request_body["input"][i] = inputs[i]; + json request_body; + request_body["tid"] = std::to_string(tid); + request_body["name"] = name; + request_body["config"] = config_json; + request_body["outWrt"] = outWrt; + request_body["inWrt"] = inWrt; + request_body["vec"] = vec; + request_body["shmem_name"] = "/umbridge"; + request_body["shmem_num_inputs"] = inputs.size(); + for (int i = 0; i < inputs.size(); i++) + { + request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); + } + if (auto res = cli.Post("/ApplyJacobianShMem", headers, request_body.dump(), "application/json")) + { + json response_body = parse_result_with_error_handling(res); + + std::vector output(output_sizes[outWrt]); + output = shmem_output.GetVector(); + return output; + } + else + { + throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + } } - request_body["vec"] = vec; - request_body["config"] = config_json; + else + { +#endif + json request_body; + request_body["name"] = name; + request_body["outWrt"] = outWrt; + request_body["inWrt"] = inWrt; + for (std::size_t i = 0; i < inputs.size(); i++) + { + request_body["input"][i] = inputs[i]; + } + request_body["vec"] = vec; + request_body["config"] = config_json; - if (auto res = cli.Post("/ApplyJacobian", headers, request_body.dump(), "application/json")) { - json response_body = parse_result_with_error_handling(res); + if (auto res = cli.Post("/ApplyJacobian", headers, request_body.dump(), "application/json")) + { + json response_body = parse_result_with_error_handling(res); - return response_body["output"].get>(); - } else { - throw std::runtime_error("POST ApplyJacobian failed with error type '" + to_string(res.error()) + "'"); + return response_body["output"].get>(); + } + else + { + throw std::runtime_error("POST ApplyJacobian failed with error type '" + to_string(res.error()) + "'"); + } +#ifdef SUPPORT_POSIX_SHMEM } +#endif } std::vector ApplyHessian(unsigned int outWrt, - unsigned int inWrt1, - unsigned int inWrt2, - const std::vector>& inputs, - const std::vector& sens, - const std::vector& vec, - json config_json = json::parse("{}")) override { + unsigned int inWrt1, + unsigned int inWrt2, + const std::vector> &inputs, + const std::vector &sens, + const std::vector &vec, + json config_json = json::parse("{}")) override + { +#ifdef SUPPORT_POSIX_SHMEM + if (supportsApplyHessianShMem) + { + unsigned int tid = pthread_self(); + std::vector> shmem_inputs; + for (int i = 0; i < inputs.size(); i++) + { + shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(tid) + "_" + std::to_string(i))); + } + std::vector output_sizes = GetOutputSizes(config_json); // Potential optimization: Avoid this call (e.g. share output memory with appropriate dimension from server side, sync with client via POSIX semaphore) - json request_body; - request_body["name"] = name; - request_body["outWrt"] = outWrt; - request_body["inWrt1"] = inWrt1; - request_body["inWrt2"] = inWrt2; - for (std::size_t i = 0; i < inputs.size(); i++) { - request_body["input"][i] = inputs[i]; + SharedMemoryVector shmem_output(output_sizes[outWrt], "/umbridge_out_" + std::to_string(tid) + "_" + std::to_string(0), true); + + json request_body; + request_body["tid"] = std::to_string(tid); + request_body["name"] = name; + request_body["config"] = config_json; + request_body["outWrt"] = outWrt; + request_body["inWrt1"] = inWrt1; + request_body["inWrt2"] = inWrt2; + request_body["shmem_name"] = "/umbridge"; + request_body["sens"] = sens; + request_body["vec"] = vec; + request_body["shmem_num_inputs"] = inputs.size(); + for (int i = 0; i < inputs.size(); i++) + { + request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); + } + if (auto res = cli.Post("/ApplyHessianShMem", headers, request_body.dump(), "application/json")) + { + json response_body = parse_result_with_error_handling(res); + + std::vector output(output_sizes[outWrt]); + output = shmem_output.GetVector(); + return output; + } + else + { + throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + } } - request_body["sens"] = sens; - request_body["vec"] = vec; - request_body["config"] = config_json; + else + { +#endif + json request_body; + request_body["name"] = name; + request_body["outWrt"] = outWrt; + request_body["inWrt1"] = inWrt1; + request_body["inWrt2"] = inWrt2; + for (std::size_t i = 0; i < inputs.size(); i++) + { + request_body["input"][i] = inputs[i]; + } + request_body["sens"] = sens; + request_body["vec"] = vec; + request_body["config"] = config_json; - if (auto res = cli.Post("/ApplyHessian", headers, request_body.dump(), "application/json")) { - json response_body = parse_result_with_error_handling(res); + if (auto res = cli.Post("/ApplyHessian", headers, request_body.dump(), "application/json")) + { + json response_body = parse_result_with_error_handling(res); - return response_body["output"].get>(); - } else { - throw std::runtime_error("POST ApplyHessian failed with error type '" + to_string(res.error()) + "'"); + return response_body["output"].get>(); + } + else + { + throw std::runtime_error("POST ApplyHessian failed with error type '" + to_string(res.error()) + "'"); + } +#ifdef SUPPORT_POSIX_SHMEM } +#endif } - bool SupportsEvaluate() override { + bool SupportsEvaluate() override + { return supportsEvaluate; } - bool SupportsGradient() override { + bool SupportsGradient() override + { return supportsGradient; } - bool SupportsApplyJacobian() override { + bool SupportsApplyJacobian() override + { return supportsApplyJacobian; } - bool SupportsApplyHessian() override { + bool SupportsApplyHessian() override + { return supportsApplyHessian; } private: - mutable httplib::Client cli; httplib::Headers headers; @@ -361,25 +611,34 @@ namespace umbridge { bool supportsApplyJacobian = false; bool supportsApplyHessian = false; bool supportsEvaluateShMem = false; + bool supportsGradientShMem = false; + bool supportsApplyJacobianShMem = false; + bool supportsApplyHessianShMem = false; - json parse_result_with_error_handling(const httplib::Result& res) const { + json parse_result_with_error_handling(const httplib::Result &res) const + { json response_body; - try { + try + { response_body = json::parse(res->body); - } catch (json::parse_error& e) { + } + catch (json::parse_error &e) + { throw std::runtime_error("Response JSON could not be parsed. Response body: '" + res->body + "'"); } - if (response_body.find("error") != response_body.end()) { + if (response_body.find("error") != response_body.end()) + { throw std::runtime_error("Model server returned error of type " + response_body["error"]["type"].get() + ", message: " + response_body["error"]["message"].get()); } return response_body; } - }; // Check if inputs dimensions match model's expected input size and return error in httplib response - bool check_input_sizes(const std::vector>& inputs, const json& config_json, const Model& model, httplib::Response& res) { - if (inputs.size() != model.GetInputSizes(config_json).size()) { + bool check_input_sizes(const std::vector> &inputs, const json &config_json, const Model &model, httplib::Response &res) + { + if (inputs.size() != model.GetInputSizes(config_json).size()) + { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Number of inputs does not match number of model inputs. Expected " + std::to_string(model.GetInputSizes(config_json).size()) + " but got " + std::to_string(inputs.size()); @@ -387,8 +646,10 @@ namespace umbridge { res.status = 400; return false; } - for (std::size_t i = 0; i < inputs.size(); i++) { - if (inputs[i].size() != model.GetInputSizes(config_json)[i]) { + for (std::size_t i = 0; i < inputs.size(); i++) + { + if (inputs[i].size() != model.GetInputSizes(config_json)[i]) + { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input size mismatch! In input " + std::to_string(i) + " model expected size " + std::to_string(model.GetInputSizes(config_json)[i]) + " but got " + std::to_string(inputs[i].size()); @@ -401,8 +662,10 @@ namespace umbridge { } // Check if sensitivity vector's dimension matches correct model output size and return error in httplib response - bool check_sensitivity_size(const std::vector& sens, int outWrt, const json& config_json, const Model& model, httplib::Response& res) { - if (sens.size() != model.GetOutputSizes(config_json)[outWrt]) { + bool check_sensitivity_size(const std::vector &sens, int outWrt, const json &config_json, const Model &model, httplib::Response &res) + { + if (sens.size() != model.GetOutputSizes(config_json)[outWrt]) + { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Sensitivity vector size mismatch! Expected " + std::to_string(model.GetOutputSizes(config_json)[outWrt]) + " but got " + std::to_string(sens.size()); @@ -414,8 +677,10 @@ namespace umbridge { } // Check if vector's dimension matches correct model output size and return error in httplib response - bool check_vector_size(const std::vector& vec, int inWrt, const json& config_json, const Model& model, httplib::Response& res) { - if (vec.size() != model.GetInputSizes(config_json)[inWrt]) { + bool check_vector_size(const std::vector &vec, int inWrt, const json &config_json, const Model &model, httplib::Response &res) + { + if (vec.size() != model.GetInputSizes(config_json)[inWrt]) + { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Vector size mismatch! Expected " + std::to_string(model.GetInputSizes(config_json)[inWrt]) + " but got " + std::to_string(vec.size()); @@ -427,8 +692,10 @@ namespace umbridge { } // Check if outputs dimensions match model's expected output size and return error in httplib response - bool check_output_sizes(const std::vector>& outputs, const json& config_json, const Model& model, httplib::Response& res) { - if (outputs.size() != model.GetOutputSizes(config_json).size()) { + bool check_output_sizes(const std::vector> &outputs, const json &config_json, const Model &model, httplib::Response &res) + { + if (outputs.size() != model.GetOutputSizes(config_json).size()) + { json response_body; response_body["error"]["type"] = "InvalidOutput"; response_body["error"]["message"] = "Number of outputs declared by model does not match number of outputs returned by model. Model declared " + std::to_string(model.GetOutputSizes(config_json).size()) + " but returned " + std::to_string(outputs.size()); @@ -436,8 +703,10 @@ namespace umbridge { res.status = 500; return false; } - for (std::size_t i = 0; i < outputs.size(); i++) { - if (outputs[i].size() != model.GetOutputSizes(config_json)[i]) { + for (std::size_t i = 0; i < outputs.size(); i++) + { + if (outputs[i].size() != model.GetOutputSizes(config_json)[i]) + { json response_body; response_body["error"]["type"] = "InvalidOutput"; response_body["error"]["message"] = "Output size mismatch! In output " + std::to_string(i) + " model declared size " + std::to_string(model.GetOutputSizes(config_json)[i]) + " but returned " + std::to_string(outputs[i].size()); @@ -450,8 +719,10 @@ namespace umbridge { } // Check if inWrt is between zero and model's input size inWrt and return error in httplib response - bool check_input_wrt(int inWrt, const json& config_json, const Model& model, httplib::Response& res) { - if (inWrt < 0 || inWrt >= (int)model.GetInputSizes(config_json).size()) { + bool check_input_wrt(int inWrt, const json &config_json, const Model &model, httplib::Response &res) + { + if (inWrt < 0 || inWrt >= (int)model.GetInputSizes(config_json).size()) + { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input inWrt out of range! Expected between 0 and " + std::to_string(model.GetInputSizes(config_json).size() - 1) + " but got " + std::to_string(inWrt); @@ -463,8 +734,10 @@ namespace umbridge { } // Check if outWrt is between zero and model's output size outWrt and return error in httplib response - bool check_output_wrt(int outWrt, const json& config_json, const Model& model, httplib::Response& res) { - if (outWrt < 0 || outWrt >= (int)model.GetOutputSizes(config_json).size()) { + bool check_output_wrt(int outWrt, const json &config_json, const Model &model, httplib::Response &res) + { + if (outWrt < 0 || outWrt >= (int)model.GetOutputSizes(config_json).size()) + { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input outWrt out of range! Expected between 0 and " + std::to_string(model.GetOutputSizes(config_json).size() - 1) + " but got " + std::to_string(outWrt); @@ -476,7 +749,8 @@ namespace umbridge { } // Construct response for unsupported feature - void write_unsupported_feature_response(httplib::Response& res, std::string feature) { + void write_unsupported_feature_response(httplib::Response &res, std::string feature) + { json response_body; response_body["error"]["type"] = "UnsupportedFeature"; response_body["error"]["message"] = "Feature '" + feature + "' is not supported by this model"; @@ -486,15 +760,18 @@ namespace umbridge { // log request - void log_request(const httplib::Request& req, const httplib::Response& res) { - std::cout << "Incoming request from: " << req.remote_addr << " | Type: " << req.method << " " << req.path << " -> " << res.status << std::endl; - + void log_request(const httplib::Request &req, const httplib::Response &res) + { + std::cout << "Incoming request from: " << req.remote_addr << " | Type: " << req.method << " " << req.path << " -> " << res.status << std::endl; } // Get model from name - Model& get_model_from_name(std::vector& models, std::string name) { - for (auto& model : models) { - if (model->GetName() == name) { + Model &get_model_from_name(std::vector &models, std::string name) + { + for (auto &model : models) + { + if (model->GetName() == name) + { return *model; } } @@ -502,10 +779,14 @@ namespace umbridge { } // Check if model exists and return error in httplib response - bool check_model_exists(std::vector& models, std::string name, httplib::Response& res) { - try { + bool check_model_exists(std::vector &models, std::string name, httplib::Response &res) + { + try + { get_model_from_name(models, name); - } catch (std::runtime_error& e) { + } + catch (std::runtime_error &e) + { json response_body; response_body["error"]["type"] = "ModelNotFound"; response_body["error"]["message"] = "Model '" + name + "' not supported by this server!"; @@ -517,12 +798,14 @@ namespace umbridge { } // Provides access to a model via network - void serveModels(std::vector models, std::string host, int port) { + void serveModels(std::vector models, std::string host, int port) + { httplib::Server svr; std::mutex model_mutex; // Ensure the underlying model is only called sequentially - svr.Post("/Evaluate", [&](const httplib::Request &req, httplib::Response &res) { + svr.Post("/Evaluate", [&](const httplib::Request &req, httplib::Response &res) + { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -555,10 +838,10 @@ namespace umbridge { response_body["output"][i] = outputs[i]; } - res.set_content(response_body.dump(), "application/json"); - }); - - svr.Post("/EvaluateShMem", [&](const httplib::Request &req, httplib::Response &res) { + res.set_content(response_body.dump(), "application/json"); }); +#ifdef SUPPORT_POSIX_SHMEM + svr.Post("/EvaluateShMem", [&](const httplib::Request &req, httplib::Response &res) + { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -571,12 +854,12 @@ namespace umbridge { std::vector> inputs; for (int i = 0; i < request_body["shmem_num_inputs"].get(); i++) { - SharedMemoryVector shmem_input(request_body["shmem_size_" + std::to_string(i)].get(), request_body["shmem_name"].get() + "_in_" + std::to_string(i), false); + SharedMemoryVector shmem_input(request_body["shmem_size_" + std::to_string(i)].get(), request_body["shmem_name"].get() + "_in_" + request_body["tid"].get() + "_" + std::to_string(i), false); inputs.push_back(shmem_input.GetVector()); } std::vector> shmem_outputs; for (int i = 0; i < model.GetOutputSizes().size(); i++) { - shmem_outputs.push_back(std::make_unique(model.GetOutputSizes()[i], request_body["shmem_name"].get() + "_out_" + std::to_string(i), false)); + shmem_outputs.push_back(std::make_unique(model.GetOutputSizes()[i], request_body["shmem_name"].get() + "_out_" + request_body["tid"].get() + "_" + std::to_string(i), false)); } json empty_default_config; @@ -596,10 +879,10 @@ namespace umbridge { } json response_body; - res.set_content(response_body.dump(), "application/json"); - }); - - svr.Post("/Gradient", [&](const httplib::Request &req, httplib::Response &res) { + res.set_content(response_body.dump(), "application/json"); }); +#endif + svr.Post("/Gradient", [&](const httplib::Request &req, httplib::Response &res) + { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -638,10 +921,56 @@ namespace umbridge { json response_body; response_body["output"] = gradient; - res.set_content(response_body.dump(), "application/json"); - }); + res.set_content(response_body.dump(), "application/json"); }); +#ifdef SUPPORT_POSIX_SHMEM + svr.Post("/GradientShMem", [&](const httplib::Request &req, httplib::Response &res) + { + json request_body = json::parse(req.body); + if (!check_model_exists(models, request_body["name"], res)) + return; + Model& model = get_model_from_name(models, request_body["name"]); + + if (!model.SupportsGradient()) { + write_unsupported_feature_response(res, "Gradient"); + return; + } + + unsigned int inWrt = request_body.at("inWrt"); + unsigned int outWrt = request_body.at("outWrt"); - svr.Post("/ApplyJacobian", [&](const httplib::Request &req, httplib::Response &res) { + std::vector> inputs; + for (int i = 0; i < request_body["shmem_num_inputs"].get(); i++) { + SharedMemoryVector shmem_input(request_body["shmem_size_" + std::to_string(i)].get(), request_body["shmem_name"].get() + "_in_" + request_body["tid"].get() + "_" + std::to_string(i), false); + inputs.push_back(shmem_input.GetVector()); + } + SharedMemoryVector shmem_output(inputs[inWrt].size(), request_body["shmem_name"].get() + "_out_" + request_body["tid"].get() + "_" + std::to_string(0), false); + + std::vector sens = request_body.at("sens"); + + json empty_default_config; + json config_json = request_body.value("config", empty_default_config); + + if (!check_input_wrt(inWrt, config_json, model, res)) + return; + if (!check_output_wrt(outWrt, config_json, model, res)) + return; + if (!check_input_sizes(inputs, config_json, model, res)) + return; + if (!check_sensitivity_size(sens, outWrt, config_json, model, res)) + return; + + const std::lock_guard model_lock(model_mutex); + std::vector gradient = model.Gradient(outWrt, inWrt, inputs, sens, config_json); + + shmem_output.SetVector(gradient); + json response_body; + + + res.set_content(response_body.dump(), "application/json"); }); +#endif + + svr.Post("/ApplyJacobian", [&](const httplib::Request &req, httplib::Response &res) + { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -680,10 +1009,54 @@ namespace umbridge { json response_body; response_body["output"] = jacobian_action; - res.set_content(response_body.dump(), "application/json"); - }); + res.set_content(response_body.dump(), "application/json"); }); +#ifdef SUPPORT_POSIX_SHMEM + svr.Post("/ApplyJacobianShMem", [&](const httplib::Request &req, httplib::Response &res) + { + json request_body = json::parse(req.body); + if (!check_model_exists(models, request_body["name"], res)) + return; + Model& model = get_model_from_name(models, request_body["name"]); + + if (!model.SupportsApplyJacobian()) { + write_unsupported_feature_response(res, "ApplyJacobian"); + return; + } + + unsigned int inWrt = request_body.at("inWrt"); + unsigned int outWrt = request_body.at("outWrt"); + + std::vector> inputs; + for (int i = 0; i < request_body["shmem_num_inputs"].get(); i++) { + SharedMemoryVector shmem_input(request_body["shmem_size_" + std::to_string(i)].get(), request_body["shmem_name"].get() + "_in_" + request_body["tid"].get() + "_" + std::to_string(i), false); + inputs.push_back(shmem_input.GetVector()); + } + SharedMemoryVector shmem_output(model.GetOutputSizes()[outWrt], request_body["shmem_name"].get() + "_out_" + request_body["tid"].get() + "_" + std::to_string(0), false); + + std::vector vec = request_body.at("vec"); + + json empty_default_config; + json config_json = request_body.value("config", empty_default_config); + + if (!check_input_wrt(inWrt, config_json, model, res)) + return; + if (!check_output_wrt(outWrt, config_json, model, res)) + return; + if (!check_input_sizes(inputs, config_json, model, res)) + return; + if (!check_vector_size(vec, inWrt, config_json, model, res)) + return; + + const std::lock_guard model_lock(model_mutex); + std::vector jacobian_action = model.ApplyJacobian(outWrt, inWrt, inputs, vec, config_json); + + json response_body; + shmem_output.SetVector(jacobian_action); - svr.Post("/ApplyHessian", [&](const httplib::Request &req, httplib::Response &res) { + res.set_content(response_body.dump(), "application/json"); }); +#endif + svr.Post("/ApplyHessian", [&](const httplib::Request &req, httplib::Response &res) + { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -726,10 +1099,58 @@ namespace umbridge { json response_body; response_body["output"] = hessian_action; - res.set_content(response_body.dump(), "application/json"); - }); + res.set_content(response_body.dump(), "application/json"); }); +#ifdef SUPPORT_POSIX_SHMEM + svr.Post("/ApplyHessianShMem", [&](const httplib::Request &req, httplib::Response &res) + { + json request_body = json::parse(req.body); + if (!check_model_exists(models, request_body["name"], res)) + return; + Model& model = get_model_from_name(models, request_body["name"]); + + if (!model.SupportsApplyHessian()) { + write_unsupported_feature_response(res, "ApplyHessian"); + return; + } + + unsigned int outWrt = request_body.at("outWrt"); + unsigned int inWrt1 = request_body.at("inWrt1"); + unsigned int inWrt2 = request_body.at("inWrt2"); + + std::vector> inputs; + for (int i = 0; i < request_body["shmem_num_inputs"].get(); i++) { + SharedMemoryVector shmem_input(request_body["shmem_size_" + std::to_string(i)].get(), request_body["shmem_name"].get() + "_in_" + request_body["tid"].get() + "_" + std::to_string(i), false); + inputs.push_back(shmem_input.GetVector()); + } + SharedMemoryVector shmem_output(model.GetOutputSizes()[outWrt], request_body["shmem_name"].get() + "_out_" + request_body["tid"].get() + "_" + std::to_string(0), false); + + std::vector sens = request_body.at("sens"); + std::vector vec = request_body.at("vec"); + + json empty_default_config; + json config_json = request_body.value("config", empty_default_config); + + if (!check_input_wrt(inWrt1, config_json, model, res)) + return; + if (!check_input_wrt(inWrt2, config_json, model, res)) + return; + if (!check_output_wrt(outWrt, config_json, model, res)) + return; + if (!check_input_sizes(inputs, config_json, model, res)) + return; + if (!check_sensitivity_size(sens, outWrt, config_json, model, res)) + return; + + const std::lock_guard model_lock(model_mutex); + std::vector hessian_action = model.ApplyHessian(outWrt, inWrt1, inWrt2, inputs, sens, vec, config_json); - svr.Get("/Info", [&](const httplib::Request &, httplib::Response &res) { + json response_body; + shmem_output.SetVector(hessian_action); + + res.set_content(response_body.dump(), "application/json"); }); +#endif + svr.Get("/Info", [&](const httplib::Request &, httplib::Response &res) + { json response_body; response_body["protocolVersion"] = 1.0; std::vector model_names; @@ -738,10 +1159,10 @@ namespace umbridge { } response_body["models"] = model_names; - res.set_content(response_body.dump(), "application/json"); - }); + res.set_content(response_body.dump(), "application/json"); }); - svr.Post("/ModelInfo", [&](const httplib::Request &req, httplib::Response &res) { + svr.Post("/ModelInfo", [&](const httplib::Request &req, httplib::Response &res) + { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -752,13 +1173,15 @@ namespace umbridge { response_body["support"]["Evaluate"] = model.SupportsEvaluate(); response_body["support"]["EvaluateShMem"] = model.SupportsEvaluate(); response_body["support"]["Gradient"] = model.SupportsGradient(); + response_body["support"]["GradientShMem"] = model.SupportsGradient(); response_body["support"]["ApplyJacobian"] = model.SupportsApplyJacobian(); + response_body["support"]["ApplyJacobianShMem"] = model.SupportsApplyJacobian(); response_body["support"]["ApplyHessian"] = model.SupportsApplyHessian(); + response_body["support"]["ApplyHessianShMem"] = model.SupportsApplyHessian(); + res.set_content(response_body.dump(), "application/json"); }); - res.set_content(response_body.dump(), "application/json"); - }); - - svr.Post("/InputSizes", [&](const httplib::Request &req, httplib::Response &res) { + svr.Post("/InputSizes", [&](const httplib::Request &req, httplib::Response &res) + { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -770,10 +1193,10 @@ namespace umbridge { json response_body; response_body["inputSizes"] = model.GetInputSizes(config_json); - res.set_content(response_body.dump(), "application/json"); - }); + res.set_content(response_body.dump(), "application/json"); }); - svr.Post("/OutputSizes", [&](const httplib::Request &req, httplib::Response &res) { + svr.Post("/OutputSizes", [&](const httplib::Request &req, httplib::Response &res) + { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -785,13 +1208,27 @@ namespace umbridge { json response_body; response_body["outputSizes"] = model.GetOutputSizes(config_json); - res.set_content(response_body.dump(), "application/json"); - }); - + res.set_content(response_body.dump(), "application/json"); }); +#ifdef SUPPORT_POSIX_SHMEM + svr.Post("/TestShMem", [&](const httplib::Request &req, httplib::Response &res) + { + json request_body = json::parse(req.body); + if (!check_model_exists(models, request_body["name"], res)) + return; + Model &model = get_model_from_name(models, request_body["name"]); + SharedMemoryVector shmem_input(1, "/umbridge_test_shmem_in_" + request_body["tid"].get(), false); + SharedMemoryVector shmem_output(1, "/umbridge_test_shmem_out_" + request_body["tid"].get(), false); + std::vector value = shmem_input.GetVector(); + shmem_output.SetVector(value); + json response_body; + response_body["value"] = value; + res.set_content(response_body.dump(), "application/json"); }); +#endif std::cout << "Listening on port " << port << "..." << std::endl; #ifdef LOGGING - svr.set_logger([](const httplib::Request& req, const httplib::Response& res) { + svr.set_logger([](const httplib::Request &req, const httplib::Response &res) + { if (res.status >= 500) { std::cerr << "[ERROR] "; } else if (res.status >= 400) { @@ -799,8 +1236,7 @@ namespace umbridge { } else { std::cout << "[INFO] "; } - log_request(req, res); - }); + log_request(req, res); }); #endif svr.listen(host.c_str(), port); std::cout << "Quit" << std::endl; From 76d88264cc239b0217cdd41f13207542f09f86a3 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Sun, 17 Mar 2024 22:14:03 +0100 Subject: [PATCH 03/21] Update um.py Shared memory functionality is expanded and now supports input and output for: Evaluate, Gradient, Jacobian and Hessian. Umbridge automatically attempts a shared memory transmission in order to detect accessibility and falls back to full HTTP/JSON transmission on failure. Works on Linux and Windows. MacOS is to be tested. --- umbridge/um.py | 525 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 463 insertions(+), 62 deletions(-) diff --git a/umbridge/um.py b/umbridge/um.py index 23b93e8b..ee5e78d1 100755 --- a/umbridge/um.py +++ b/umbridge/um.py @@ -4,6 +4,7 @@ from concurrent.futures import ThreadPoolExecutor from multiprocessing import shared_memory import numpy as np +import threading class Model(object): @@ -53,6 +54,37 @@ def __init__(self, url, name): self.__supports_gradient = response["support"].get("Gradient", False) self.__supports_apply_jacobian = response["support"].get("ApplyJacobian", False) self.__supports_apply_hessian = response["support"].get("ApplyHessian", False) + self.__supports_evaluate_shmem = response["support"].get("EvaluateShMem", False) + self.__supports_gradient_shmem = response["support"].get("GradientShMem", False) + self.__supports_apply_jacobian_shmem = response["support"].get("ApplyJacobianShMem", False) + self.__supports_apply_hessian_shmem = response["support"].get("ApplyHessianShMem", False) + + #Test whether client and server are able to communicate through shared memory. Disables ShMem if test fails. + testvec = [12345.0] + tid = threading.get_native_id() + input["tid"] = str(tid) + shm_c_in = shared_memory.SharedMemory("/umbridge_test_shmem_in_" + str(tid), True, 8) + raw_shmem_input = np.ndarray(1, dtype=np.float64, buffer=shm_c_in.buf) + raw_shmem_input[:] = testvec[0] + shm_c_out = shared_memory.SharedMemory("/umbridge_test_shmem_out_" + str(tid), create=True, size=8) + raw_shmem_output = np.ndarray(1, dtype=np.float64, buffer=shm_c_out.buf) + response = requests.post(f"{self.url}/TestShMem", json=input).json() + result = [] + result.append(raw_shmem_output.tolist()[0]) + shm_c_in.close() + shm_c_in.unlink() + shm_c_out.close() + shm_c_out.unlink() + + if(result[0] != testvec[0]): + self.__supports_evaluate_shmem = False + self.__supports_gradient_shmem = False + self.__supports_apply_jacobian_shmem = False + self.__supports_apply_hessian_shmem = False + print("Server not accessible via shared memory") + else: + print("Server accessible via shared memory") + def get_input_sizes(self, config={}): input = {} @@ -80,6 +112,18 @@ def supports_apply_jacobian(self): def supports_apply_hessian(self): return self.__supports_apply_hessian + def supports_evaluate_shmem(self): + return self.__supports_evaluate_shmem + + def supports_gradient_shmem(self): + return self.__supports_gradient_shmem + + def supports_apply_jacobian_shmem(self): + return self.__supports_apply_jacobian_shmem + + def supports_apply_hessian_shmem(self): + return self.__supports_apply_hessian_shmem + def __check_input_is_list_of_lists(self,parameters): if not isinstance(parameters, list): raise Exception("Parameters must be a list of lists!") @@ -90,72 +134,230 @@ def __call__(self, parameters, config={}): if not self.supports_evaluate(): raise Exception('Evaluation not supported by model!') self.__check_input_is_list_of_lists(parameters) - - inputParams = {} - inputParams["name"] = self.name - inputParams["input"] = parameters - inputParams["config"] = config - response = requests.post(f"{self.url}/Evaluate", json=inputParams).json() - - if "error" in response: - raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') - return response["output"] - + if(self.supports_evaluate_shmem()): + tid = threading.get_native_id() + inputParams = {} + inputParams["tid"] = str(tid) + inputParams["name"] = self.name + inputParams["config"] = config + inputParams["shmem_name"] = "/umbridge" + inputParams["shmem_num_inputs"] = len(parameters) + buffers = [] + + for i in range(len(parameters)): + inputParams["shmem_size_" + str(i)] = len(parameters[i]) + shm_c_in = shared_memory.SharedMemory(inputParams["shmem_name"] + "_in_" + str(tid) + f"_{i}", create=True, size=len(parameters[i])*8) + raw_shmem_input = np.ndarray((len(parameters[i]),), dtype=np.float64, buffer=shm_c_in.buf) + raw_shmem_input[:] = parameters[i] + buffers.append(shm_c_in) + output_sizes = self.get_output_sizes(config) + + for i in range(len(output_sizes)): + shm_c_out = shared_memory.SharedMemory(inputParams["shmem_name"] + "_out_" + str(tid) + f"_{i}", create=True, size=output_sizes[i]*8) + raw_shmem_input = np.ndarray((output_sizes[i],), dtype=np.float64, buffer=shm_c_in.buf) + buffers.append(shm_c_out) + response = requests.post(f"{self.url}/EvaluateShMem", json=inputParams).json() + output = [] + for i in range(len(output_sizes)): + shm_c_out = shared_memory.SharedMemory(inputParams["shmem_name"] + "_out_" + str(tid) + f"_{i}", create=False, size=output_sizes[i]*8) + raw_shmem_output = np.ndarray((output_sizes[i],), dtype=np.float64, buffer=shm_c_out.buf) + output.append(raw_shmem_output.tolist()) + + for buffer in buffers: + buffer.close() + buffer.unlink() + + if response is not None and "error" in response: + raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') + return output + + else: + + inputParams = {} + inputParams["name"] = self.name + inputParams["input"] = parameters + inputParams["config"] = config + response = requests.post(f"{self.url}/Evaluate", json=inputParams).json() + + if "error" in response: + raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') + return response["output"] + def gradient(self, out_wrt, in_wrt, parameters, sens, config={}): if not self.supports_gradient(): raise Exception('Gradient not supported by model!') self.__check_input_is_list_of_lists(parameters) - - inputParams = {} - inputParams["name"] = self.name - inputParams["outWrt"] = out_wrt - inputParams["inWrt"] = in_wrt - inputParams["input"] = parameters - inputParams["sens"] = sens - inputParams["config"] = config - response = requests.post(f"{self.url}/Gradient", json=inputParams).json() - - if "error" in response: - raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') - return response["output"] + if(self.supports_gradient_shmem()): + tid = threading.get_native_id() + inputParams = {} + inputParams["tid"] = str(tid) + inputParams["name"] = self.name + inputParams["outWrt"] = out_wrt + inputParams["inWrt"] = in_wrt + inputParams["sens"] = sens + inputParams["config"] = config + inputParams["shmem_name"] = "/umbridge" + inputParams["shmem_num_inputs"] = len(parameters) + buffers = [] + + for i in range(len(parameters)): + inputParams["shmem_size_" + str(i)] = len(parameters[i]) + shm_c_in = shared_memory.SharedMemory(inputParams["shmem_name"] + "_in_" + str(tid) + f"_{i}", create=True, size=len(parameters[i])*8) + raw_shmem_input = np.ndarray((len(parameters[i]),), dtype=np.float64, buffer=shm_c_in.buf) + raw_shmem_input[:] = parameters[i] + buffers.append(shm_c_in) + + shm_c_out = shared_memory.SharedMemory(inputParams["shmem_name"] + "_out_" + str(tid) + f"_{0}", create=True, size=len(parameters[in_wrt])*8) + raw_shmem_input = np.ndarray((len(parameters[in_wrt]),), dtype=np.float64, buffer=shm_c_in.buf) + buffers.append(shm_c_out) + response = requests.post(f"{self.url}/GradientShMem", json=inputParams).json() + + output = [] + shm_c_out = shared_memory.SharedMemory(inputParams["shmem_name"] + "_out_" + str(tid) + f"_{0}", create=False, size=len(parameters[in_wrt])*8) + raw_shmem_output = np.ndarray((len(parameters[in_wrt]),), dtype=np.float64, buffer=shm_c_out.buf) + output = raw_shmem_output.tolist() + for buffer in buffers: + buffer.close() + buffer.unlink() + + if response is not None and "error" in response: + raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') + return output + + else: + + inputParams = {} + inputParams["name"] = self.name + inputParams["outWrt"] = out_wrt + inputParams["inWrt"] = in_wrt + inputParams["input"] = parameters + inputParams["sens"] = sens + inputParams["config"] = config + response = requests.post(f"{self.url}/Gradient", json=inputParams).json() + + if "error" in response: + raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') + return response["output"] def apply_jacobian(self, out_wrt, in_wrt, parameters, vec, config={}): if not self.supports_apply_jacobian(): raise Exception('ApplyJacobian not supported by model!') self.__check_input_is_list_of_lists(parameters) - - inputParams = {} - inputParams["name"] = self.name - inputParams["outWrt"] = out_wrt - inputParams["inWrt"] = in_wrt - inputParams["input"] = parameters - inputParams["vec"] = vec - inputParams["config"] = config - response = requests.post(f"{self.url}/ApplyJacobian", json=inputParams).json() - - if "error" in response: - raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') - return response["output"] + if(self.supports_apply_jacobian_shmem()): + tid = threading.get_native_id() + inputParams = {} + inputParams["tid"] = str(tid) + inputParams["name"] = self.name + inputParams["outWrt"] = out_wrt + inputParams["inWrt"] = in_wrt + inputParams["vec"] = vec + inputParams["config"] = config + inputParams["shmem_name"] = "/umbridge" + inputParams["shmem_num_inputs"] = len(parameters) + buffers = [] + + for i in range(len(parameters)): + inputParams["shmem_size_" + str(i)] = len(parameters[i]) + shm_c_in = shared_memory.SharedMemory(inputParams["shmem_name"] + "_in_" + str(tid) + f"_{i}" , create=True, size=len(parameters[i])*8) + raw_shmem_input = np.ndarray((len(parameters[i]),), dtype=np.float64, buffer=shm_c_in.buf) + raw_shmem_input[:] = parameters[i] + buffers.append(shm_c_in) + + output_sizes = self.get_output_sizes(config) + + shm_c_out = shared_memory.SharedMemory(inputParams["shmem_name"] + "_out_" + str(tid) + f"_{0}", create=True, size=output_sizes[out_wrt]*8) + raw_shmem_input = np.ndarray((output_sizes[out_wrt],), dtype=np.float64, buffer=shm_c_in.buf) + buffers.append(shm_c_out) + + response = requests.post(f"{self.url}/ApplyJacobianShMem", json=inputParams).json() + + output = [] + shm_c_out = shared_memory.SharedMemory(inputParams["shmem_name"] + "_out_" + str(tid) + f"_{0}", create=False, size=output_sizes[out_wrt]*8) + raw_shmem_output = np.ndarray((output_sizes[out_wrt],), dtype=np.float64, buffer=shm_c_out.buf) + output = raw_shmem_output.tolist() + for buffer in buffers: + buffer.close() + buffer.unlink() + if response is not None and "error" in response: + raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') + return output + + else: + + inputParams = {} + inputParams["name"] = self.name + inputParams["outWrt"] = out_wrt + inputParams["inWrt"] = in_wrt + inputParams["input"] = parameters + inputParams["vec"] = vec + inputParams["config"] = config + response = requests.post(f"{self.url}/ApplyJacobian", json=inputParams).json() + + if "error" in response: + raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') + return response["output"] def apply_hessian(self, out_wrt, in_wrt1, in_wrt2, parameters, sens, vec, config={}): if not self.supports_apply_hessian(): raise Exception('ApplyHessian not supported by model!') self.__check_input_is_list_of_lists(parameters) - - inputParams = {} - inputParams["name"] = self.name - inputParams["outWrt"] = out_wrt - inputParams["inWrt1"] = in_wrt1 - inputParams["inWrt2"] = in_wrt2 - inputParams["input"] = parameters - inputParams["sens"] = sens - inputParams["vec"] = vec - inputParams["config"] = config - response = requests.post(f"{self.url}/ApplyHessian", json=inputParams).json() - - if "error" in response: - raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') - return response["output"] + if(self.supports_apply_hessian_shmem()): + tid = threading.get_native_id() + inputParams = {} + inputParams["tid"] = str(tid) + inputParams["name"] = self.name + inputParams["outWrt"] = out_wrt + inputParams["inWrt1"] = in_wrt1 + inputParams["inWrt2"] = in_wrt2 + inputParams["sens"] = sens + inputParams["vec"] = vec + inputParams["config"] = config + inputParams["shmem_name"] = "/umbridge" + inputParams["shmem_num_inputs"] = len(parameters) + buffers = [] + + for i in range(len(parameters)): + inputParams["shmem_size_" + str(i)] = len(parameters[i]) + shm_c_in = shared_memory.SharedMemory(inputParams["shmem_name"] + "_in_" + str(tid) + f"_{i}", create=True, size=len(parameters[i])*8) + raw_shmem_input = np.ndarray((len(parameters[i]),), dtype=np.float64, buffer=shm_c_in.buf) + raw_shmem_input[:] = parameters[i] + buffers.append(shm_c_in) + + output_sizes = self.get_output_sizes(config) + + shm_c_out = shared_memory.SharedMemory(inputParams["shmem_name"] + "_out_" + str(tid) + f"_{0}", create=True, size=output_sizes[out_wrt]*8) + raw_shmem_input = np.ndarray((output_sizes[out_wrt],), dtype=np.float64, buffer=shm_c_in.buf) + buffers.append(shm_c_out) + + response = requests.post(f"{self.url}/ApplyHessianShMem", json=inputParams).json() + + output = [] + shm_c_out = shared_memory.SharedMemory(inputParams["shmem_name"] + "_out_" + str(tid) + f"_{0}", create=False, size=output_sizes[out_wrt]*8) + raw_shmem_output = np.ndarray((output_sizes[out_wrt],), dtype=np.float64, buffer=shm_c_out.buf) + output = raw_shmem_output.tolist() + for buffer in buffers: + buffer.close() + buffer.unlink() + if response is not None and "error" in response: + raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') + return output + + else: + + inputParams = {} + inputParams["name"] = self.name + inputParams["outWrt"] = out_wrt + inputParams["inWrt1"] = in_wrt1 + inputParams["inWrt2"] = in_wrt2 + inputParams["input"] = parameters + inputParams["sens"] = sens + inputParams["vec"] = vec + inputParams["config"] = config + response = requests.post(f"{self.url}/ApplyHessian", json=inputParams).json() + + if "error" in response: + raise Exception(f'Model returned error of type {response["error"]["type"]}: {response["error"]["message"]}') + return response["output"] def serve_models(models, port=4242, max_workers=1): @@ -184,7 +386,6 @@ def get_model_from_name(name): @routes.post('/Evaluate') async def evaluate(request): - req_json = await request.json() model_name = req_json["name"] model = get_model_from_name(model_name) @@ -225,7 +426,6 @@ async def evaluate(request): @routes.post('/EvaluateShMem') async def evaluate(request): - req_json = await request.json() model_name = req_json["name"] model = get_model_from_name(model_name) @@ -240,11 +440,10 @@ async def evaluate(request): parameters = [] for i in range(req_json["shmem_num_inputs"]): - shm_c = shared_memory.SharedMemory(req_json["shmem_name"] + f"_in_{i}", False, req_json[f"shmem_size_{i}"]) - raw_shmem_parameter = np.ndarray((req_json[f"shmem_size_{i}"],), dtype=np.float64, buffer=shm_c.buf) + shm_c_in = shared_memory.SharedMemory(req_json["shmem_name"] + "_in_" + str(req_json["tid"]) + f"_{i}", False, req_json[f"shmem_size_{i}"]) + raw_shmem_parameter = np.ndarray((req_json[f"shmem_size_{i}"],), dtype=np.float64, buffer=shm_c_in.buf) parameters.append(raw_shmem_parameter.tolist()) - shm_c.close() - shm_c.unlink() + shm_c_in.close() # Check if parameter dimensions match model input sizes if len(parameters) != len(model.get_input_sizes(config)): @@ -271,10 +470,10 @@ async def evaluate(request): # Write output to shared memory for i in range(len(output)): - shm_c = shared_memory.SharedMemory(req_json["shmem_name"] + f"_out_{i}", create=False, size=len(output[i])*8) - raw_shmem_output = np.ndarray((len(output[i]),), dtype=np.float64, buffer=shm_c.buf) + shm_c_out = shared_memory.SharedMemory(req_json["shmem_name"] + "_out_" + str(req_json["tid"]) + f"_{i}", create=False, size=len(output[i])*8) + raw_shmem_output = np.ndarray((len(output[i]),), dtype=np.float64, buffer=shm_c_out.buf) raw_shmem_output[:] = output[i] - shm_c.close() + shm_c_out.close() return web.Response(text="{}") @@ -326,6 +525,65 @@ async def gradient(request): return web.Response(text=f"{{\"output\": {output} }}") + + @routes.post('/GradientShMem') + async def gradient(request): + req_json = await request.json() + model_name = req_json["name"] + model = get_model_from_name(model_name) + if model is None: + return model_not_found_response(req_json["name"]) + if not model.supports_gradient(): + return error_response("UnsupportedFeature", "Gradient not supported by model!", 400) + + out_wrt = req_json["outWrt"] + in_wrt = req_json["inWrt"] + sens = req_json["sens"] + config = {} + if "config" in req_json: + config = req_json["config"] + parameters = [] + for i in range(req_json["shmem_num_inputs"]): + shm_c_in = shared_memory.SharedMemory(req_json["shmem_name"] + "_in_" + str(req_json["tid"]) + f"_{i}", False, req_json[f"shmem_size_{i}"]) + raw_shmem_parameter = np.ndarray((req_json[f"shmem_size_{i}"],), dtype=np.float64, buffer=shm_c_in.buf) + parameters.append(raw_shmem_parameter.tolist()) + shm_c_in.close() + + # Check if parameter dimensions match model input sizes + if len(parameters) != len(model.get_input_sizes(config)): + return error_response("InvalidInput", "Number of input parameters does not match model number of model inputs!", 400) + for i in range(len(parameters)): + if len(parameters[i]) != model.get_input_sizes(config)[i]: + return error_response("InvalidInput", f"Input parameter {i} has invalid length! Expected {model.get_input_sizes(config)[i]} but got {len(parameters[i])}.", 400) + # Check if outWrt is not between zero and number of outputs + if out_wrt < 0 or out_wrt >= len(model.get_output_sizes(config)): + return error_response("InvalidInput", "Invalid outWrt index! Expected between 0 and number of outputs minus one, but got " + str(out_wrt), 400) + # Check if inWrt is between zero and number of inputs + if in_wrt < 0 or in_wrt >= len(model.get_input_sizes(config)): + return error_response("InvalidInput", "Invalid inWrt index! Expected between 0 and number of inputs minus one, but got " + str(in_wrt), 400) + # Check if sensitivity vector length matches model output outWrt + if len(sens) != model.get_output_sizes(config)[out_wrt]: + return error_response("InvalidInput", f"Sensitivity vector sens has invalid length! Expected {model.get_output_sizes(config)[out_wrt]} but got {len(sens)}.", 400) + + output_future = model_executor.submit(model.gradient, out_wrt, in_wrt, parameters, sens, config) + output = await asyncio.wrap_future(output_future) + + # Check if output is a list + if not isinstance(output, list): + return error_response("InvalidOutput", "Model output is not a list!", 500) + + # Check if output dimension matches model ipuut size inWrt + if len(output) != model.get_input_sizes(config)[in_wrt]: + return error_response("InvalidOutput", f"Output vector has invalid length! Model declared {model.get_input_sizes(config)[in_wrt]} but returned {len(output)}.", 500) + + # Write output to shared memory + shm_c_out = shared_memory.SharedMemory(req_json["shmem_name"] + "_out_" + str(req_json["tid"]) + f"_{0}", create=False, size=len(output)*8) + raw_shmem_output = np.ndarray((len(output),), dtype=np.float64, buffer=shm_c_out.buf) + raw_shmem_output[:] = output + shm_c_out.close() + + return web.Response(text="{}") + @routes.post('/ApplyJacobian') async def applyjacobian(request): @@ -374,6 +632,64 @@ async def applyjacobian(request): return web.Response(text=f"{{\"output\": {output} }}") + @routes.post('/ApplyJacobianShMem') + async def applyjacobian(request): + req_json = await request.json() + model_name = req_json["name"] + model = get_model_from_name(model_name) + if model is None: + return model_not_found_response(req_json["name"]) + if not model.supports_apply_jacobian(): + return error_response("UnsupportedFeature", "ApplyJacobian not supported by model!", 400) + + out_wrt = req_json["outWrt"] + in_wrt = req_json["inWrt"] + parameters = [] + for i in range(req_json["shmem_num_inputs"]): + shm_c_in = shared_memory.SharedMemory(req_json["shmem_name"] + "_in_" + str(req_json["tid"]) + f"_{i}", False, req_json[f"shmem_size_{i}"]) + raw_shmem_parameter = np.ndarray((req_json[f"shmem_size_{i}"],), dtype=np.float64, buffer=shm_c_in.buf) + parameters.append(raw_shmem_parameter.tolist()) + shm_c_in.close() + vec = req_json["vec"] + config = {} + if "config" in req_json: + config = req_json["config"] + + # Check if parameter dimensions match model input sizes + if len(parameters) != len(model.get_input_sizes(config)): + return error_response("InvalidInput", "Number of input parameters does not match model number of model inputs!", 400) + for i in range(len(parameters)): + if len(parameters[i]) != model.get_input_sizes(config)[i]: + return error_response("InvalidInput", f"Input parameter {i} has invalid length! Expected {model.get_input_sizes(config)[i]} but got {len(parameters[i])}.", 400) + # Check if outWrt is not between zero and number of outputs + if out_wrt < 0 or out_wrt >= len(model.get_output_sizes(config)): + return error_response("InvalidInput", "Invalid outWrt index! Expected between 0 and number of outputs minus one, but got " + str(out_wrt), 400) + # Check if inWrt is between zero and number of inputs + if in_wrt < 0 or in_wrt >= len(model.get_input_sizes(config)): + return error_response("InvalidInput", "Invalid inWrt index! Expected between 0 and number of inputs minus one, but got " + str(in_wrt), 400) + # Check if vector length matches model input inWrt + if len(vec) != model.get_input_sizes(config)[in_wrt]: + return error_response("InvalidInput", f"Vector vec has invalid length! Expected {model.get_input_sizes(config)[in_wrt]} but got {len(vec)}.", 400) + + output_future = model_executor.submit(model.apply_jacobian, out_wrt, in_wrt, parameters, vec, config) + output = await asyncio.wrap_future(output_future) + + # Check if output is a list + if not isinstance(output, list): + return error_response("InvalidOutput", "Model output is not a list!", 500) + + # Check if output dimension matches model output size outWrt + if len(output) != model.get_output_sizes(config)[out_wrt]: + return error_response("InvalidOutput", f"Output vector has invalid length! Model declared {model.get_output_sizes(config)[out_wrt]} but returned {len(output)}.", 500) + + # Write output to shared memory + shm_c_out = shared_memory.SharedMemory(req_json["shmem_name"] + "_out_" + str(req_json["tid"]) + f"_{0}", create=False, size=len(output)*8) + raw_shmem_output = np.ndarray((len(output),), dtype=np.float64, buffer=shm_c_out.buf) + raw_shmem_output[:] = output + shm_c_out.close() + + return web.Response(text="{}") + @routes.post('/ApplyHessian') async def applyhessian(request): @@ -421,8 +737,68 @@ async def applyhessian(request): # Check if output dimension matches model output size outWrt if len(output) != model.get_output_sizes(config)[out_wrt]: return error_response("InvalidOutput", f"Output vector has invalid length! Model declared {model.get_output_sizes(config)[out_wrt]} but returned {len(output)}.", 500) - + return web.Response(text=f"{{\"output\": {output} }}") + + @routes.post('/ApplyHessianShMem') + async def applyhessian(request): + req_json = await request.json() + model_name = req_json["name"] + model = get_model_from_name(model_name) + if model is None: + return model_not_found_response(req_json["name"]) + if not model.supports_apply_hessian(): + return error_response("UnsupportedFeature", "ApplyHessian not supported by model!", 400) + + out_wrt = req_json["outWrt"] + in_wrt1 = req_json["inWrt1"] + in_wrt2 = req_json["inWrt2"] + parameters = [] + for i in range(req_json["shmem_num_inputs"]): + shm_c_in = shared_memory.SharedMemory(req_json["shmem_name"] + "_in_" + str(req_json["tid"]) + f"_{i}", False, req_json[f"shmem_size_{i}"]) + raw_shmem_parameter = np.ndarray((req_json[f"shmem_size_{i}"],), dtype=np.float64, buffer=shm_c_in.buf) + parameters.append(raw_shmem_parameter.tolist()) + shm_c_in.close() + sens = req_json["sens"] + vec = req_json["vec"] + config = {} + if "config" in req_json: + config = req_json["config"] + + # Check if parameter dimensions match model input sizes + if len(parameters) != len(model.get_input_sizes(config)): + return error_response("InvalidInput", "Number of input parameters does not match model number of model inputs!", 400) + for i in range(len(parameters)): + if len(parameters[i]) != model.get_input_sizes(config)[i]: + return error_response("InvalidInput", f"Input parameter {i} has invalid length! Expected {model.get_input_sizes(config)[i]} but got {len(parameters[i])}.", 400) + # Check if outWrt is not between zero and number of outputs + if out_wrt < 0 or out_wrt >= len(model.get_output_sizes(config)): + return error_response("InvalidInput", "Invalid outWrt index! Expected between 0 and number of outputs minus one, but got " + str(out_wrt), 400) + # Check if inWrt is between zero and number of inputs + if in_wrt1 < 0 or in_wrt1 >= len(model.get_input_sizes(config)): + return error_response("InvalidInput", "Invalid inWrt1 index! Expected between 0 and number of inputs minus one, but got " + str(in_wrt1), 400) + # Check if inWrt is between zero and number of inputs + if in_wrt2 < 0 or in_wrt2 >= len(model.get_input_sizes(config)): + return error_response("InvalidInput", "Invalid inWrt2 index! Expected between 0 and number of inputs minus one, but got " + str(in_wrt2), 400) + + output_future = model_executor.submit(model.apply_hessian, out_wrt, in_wrt1, in_wrt2, parameters, sens, vec, config) + output = await asyncio.wrap_future(output_future) + + # Check if output is a list + if not isinstance(output, list): + return error_response("InvalidOutput", "Model output is not a list!", 500) + + # Check if output dimension matches model output size outWrt + if len(output) != model.get_output_sizes(config)[out_wrt]: + return error_response("InvalidOutput", f"Output vector has invalid length! Model declared {model.get_output_sizes(config)[out_wrt]} but returned {len(output)}.", 500) + + # Write output to shared memory + shm_c_out = shared_memory.SharedMemory(req_json["shmem_name"] + "_out_" + str(req_json["tid"]) + f"_{0}", create=False, size=len(output)*8) + raw_shmem_output = np.ndarray((len(output),), dtype=np.float64, buffer=shm_c_out.buf) + raw_shmem_output[:] = output + shm_c_out.close() + + return web.Response(text="{}") @routes.post('/InputSizes') async def get_input_sizes(request): @@ -461,11 +837,36 @@ async def modelinfo(request): response_body["support"]["Evaluate"] = model.supports_evaluate() response_body["support"]["EvaluateShMem"] = model.supports_evaluate() response_body["support"]["Gradient"] = model.supports_gradient() + response_body["support"]["GradientShMem"] = model.supports_gradient() response_body["support"]["ApplyJacobian"] = model.supports_apply_jacobian() + response_body["support"]["ApplyJacobianShMem"] = model.supports_apply_jacobian() response_body["support"]["ApplyHessian"] = model.supports_apply_hessian() + response_body["support"]["ApplyHessianShMem"] = model.supports_apply_hessian() return web.json_response(response_body) + @routes.post('/TestShMem') + async def test_shmem(request): + req_json = await request.json() + model_name = req_json["name"] + model = get_model_from_name(model_name) + if model is None: + return model_not_found_response(req_json["name"]) + + parameters = [] + shm_c_in = shared_memory.SharedMemory("/umbridge_test_shmem_in_" + str(req_json["tid"]), False, 8) + raw_shmem_parameter = np.ndarray(1, dtype=np.float64, buffer=shm_c_in.buf) + parameters.append(raw_shmem_parameter.tolist()) + shm_c_in.close() + + shm_c_out = shared_memory.SharedMemory("/umbridge_test_shmem_out_" + str(req_json["tid"]), create=False, size=8) + raw_shmem_output = np.ndarray(1, dtype=np.float64, buffer=shm_c_out.buf) + raw_shmem_output[:] = parameters[0] + shm_c_out.close() + response_body= {} + response_body["value"] = parameters[0] + return web.json_response(response_body) + @routes.get('/Info') async def info(request): response_body = {} From 6ba59116a5abf099b1468f58f2214477e478610a Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 25 Mar 2024 00:29:34 +0100 Subject: [PATCH 04/21] Update umbridge.h --- lib/umbridge.h | 454 +++++++++++++++++-------------------------------- 1 file changed, 158 insertions(+), 296 deletions(-) diff --git a/lib/umbridge.h b/lib/umbridge.h index 076f1ae4..c7cc3003 100644 --- a/lib/umbridge.h +++ b/lib/umbridge.h @@ -2,11 +2,14 @@ #define UMBRIDGE // Only enable shared memory functionality on Linux as it supports POSIX standard (Apple Mac probably too, needs testing of shared memory and pthread_self). -// TO-DO?: Future support for Windows will require an implementation using WinAPI considering different behaviour than POSIX. +// TO-DO?: Future support for Windows will require a shared memory vector implementation using WinAPI considering different behaviour than POSIX. #if defined __linux__ #define SUPPORT_POSIX_SHMEM #endif - +#ifdef SUPPORT_POSIX_SHMEM +#include +#include +#endif // #define LOGGING // Increase timeout to allow for long-running models. @@ -16,10 +19,6 @@ #include #include -#ifdef SUPPORT_POSIX_SHMEM -#include -#include -#endif #include "json.hpp" #include "httplib.h" @@ -38,56 +37,37 @@ namespace umbridge virtual std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const = 0; virtual std::vector> Evaluate(const std::vector> &inputs, - json config_json = json::parse("{}")) - { - (void)inputs; - (void)config_json; // Avoid unused argument warnings + json config_json = json::parse("{}")) { + (void)inputs; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("Evaluate was called, but not implemented by model!"); } virtual std::vector Gradient(unsigned int outWrt, - unsigned int inWrt, - const std::vector> &inputs, - const std::vector &sens, - json config_json = json::parse("{}")) - { - (void)outWrt; - (void)inWrt; - (void)inputs; - (void)sens; - (void)config_json; // Avoid unused argument warnings + unsigned int inWrt, + const std::vector> &inputs, + const std::vector &sens, + json config_json = json::parse("{}")) { + (void)outWrt; (void)inWrt; (void)inputs; (void)sens; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("Gradient was called, but not implemented by model!"); } virtual std::vector ApplyJacobian(unsigned int outWrt, - unsigned int inWrt, - const std::vector> &inputs, - const std::vector &vec, - json config_json = json::parse("{}")) - { - (void)outWrt; - (void)inWrt; - (void)inputs; - (void)vec; - (void)config_json; // Avoid unused argument warnings + unsigned int inWrt, + const std::vector> &inputs, + const std::vector &vec, + json config_json = json::parse("{}")) { + (void)outWrt; (void)inWrt; (void)inputs; (void)vec; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("ApplyJacobian was called, but not implemented by model!"); } virtual std::vector ApplyHessian(unsigned int outWrt, - unsigned int inWrt1, - unsigned int inWrt2, - const std::vector> &inputs, - const std::vector &sens, - const std::vector &vec, - json config_json = json::parse("{}")) - { - (void)outWrt; - (void)inWrt1; - (void)inWrt2; - (void)inputs; - (void)sens; - (void)vec; - (void)config_json; // Avoid unused argument warnings + unsigned int inWrt1, + unsigned int inWrt2, + const std::vector> &inputs, + const std::vector &sens, + const std::vector &vec, + json config_json = json::parse("{}")) { + (void)outWrt; (void)inWrt1; (void)inWrt2; (void)inputs; (void)sens; (void)vec; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("ApplyHessian was called, but not implemented by model!"); } @@ -102,40 +82,38 @@ namespace umbridge std::string name; }; - std::vector SupportedModels(std::string host, httplib::Headers headers = httplib::Headers()) - { + std::vector SupportedModels(std::string host, httplib::Headers headers = httplib::Headers()) { httplib::Client cli(host.c_str()); - if (auto res = cli.Get("/Info", headers)) - { + if (auto res = cli.Get("/Info", headers)) { json response = json::parse(res->body); if (response.value("protocolVersion", 0) != 1.0) throw std::runtime_error("Model protocol version not supported!"); return response["models"]; - } - else - { + + } else { throw std::runtime_error("GET Info failed with error type '" + to_string(res.error()) + "'"); } } + #ifdef SUPPORT_POSIX_SHMEM class SharedMemoryVector { public: SharedMemoryVector(std::size_t size, std::string shmem_name, bool create) - : length(size * sizeof(double)), shmem_name(shmem_name) - { + : length(size * sizeof(double)), shmem_name(shmem_name) { int oflags = O_RDWR; - if (create) - { + if (create) { created = true; oflags |= O_CREAT; } int fd = shm_open(shmem_name.c_str(), oflags, 0644); // Create shared memory ftruncate(fd, length); // Set size of shared memory - assert(fd > 0); + if(fd < 0){ + throw std::runtime_error("Shared Memory object could not be created or found by name"); + } ptr = (u_char *)mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); // Map shared memory to process close(fd); @@ -144,25 +122,21 @@ namespace umbridge } SharedMemoryVector(const std::vector &vector, std::string shmem_name) - : SharedMemoryVector(vector.size(), shmem_name, true) - { + : SharedMemoryVector(vector.size(), shmem_name, true) { SetVector(vector); } - std::vector GetVector() - { + std::vector GetVector() { std::vector vector(length / sizeof(double)); memcpy(vector.data(), ptr, length); return vector; } - void SetVector(const std::vector &vector) - { + void SetVector(const std::vector &vector) { memcpy(ptr, vector.data(), length); } - ~SharedMemoryVector() - { + ~SharedMemoryVector() { munmap(ptr, length); if (created) shm_unlink(shmem_name.c_str()); @@ -177,19 +151,17 @@ namespace umbridge #endif // Client-side Model connecting to a server for the actual evaluations etc. - class HTTPModel : public Model - { + class HTTPModel : public Model { public: + HTTPModel(std::string host, std::string name, httplib::Headers headers = httplib::Headers()) : Model(name), cli(host.c_str()), headers(headers) { // Check if requested model is available on server std::vector models = SupportedModels(host, headers); - if (std::find(models.begin(), models.end(), name) == models.end()) - { + if (std::find(models.begin(), models.end(), name) == models.end()) { std::string model_names = ""; - for (auto &m : models) - { + for (auto &m : models) { model_names += "'" + m + "' "; } throw std::runtime_error("Model " + name + " not found on server! Available models: " + model_names + "."); @@ -198,8 +170,7 @@ namespace umbridge json request_body; request_body["name"] = name; - if (auto res = cli.Post("/ModelInfo", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/ModelInfo", headers, request_body.dump(), "application/json")) { json response = json::parse(res->body); json supported_features = response.at("support"); @@ -213,9 +184,7 @@ namespace umbridge supportsApplyJacobianShMem = supported_features.value("ApplyJacobianShMem", false); supportsApplyHessianShMem = supported_features.value("ApplyHessianShMem", false); #endif - } - else - { + } else { throw std::runtime_error("POST ModelInfo failed with error type '" + to_string(res.error()) + "'"); } #ifdef SUPPORT_POSIX_SHMEM @@ -227,45 +196,36 @@ namespace umbridge SharedMemoryVector shmem_output(1, "/umbridge_test_shmem_out_" + std::to_string(tid), true); auto res = cli.Post("/TestShMem", headers, request_body.dump(), "application/json"); - if (shmem_output.GetVector()[0] != testvec[0]) - { - std::cout << shmem_output.GetVector()[0] << std::endl; + if (shmem_output.GetVector()[0] != testvec[0]) { supportsEvaluateShMem = false; supportsApplyJacobianShMem = false; supportsApplyHessianShMem = false; supportsGradientShMem = false; std::cout << "Server not accessible via shared memory" << std::endl; - } - else - { + } else { std::cout << "Server accessible via shared memory" << std::endl; } #endif } - std::vector GetInputSizes(const json &config_json = json::parse("{}")) const override - { + std::vector GetInputSizes(const json &config_json = json::parse("{}")) const override { json request_body; request_body["name"] = name; if (!config_json.empty()) request_body["config"] = config_json; - if (auto res = cli.Post("/InputSizes", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/InputSizes", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector outputvec = response_body["inputSizes"].get>(); return outputvec; - } - else - { + } else { throw std::runtime_error("POST InputSizes failed with error type '" + to_string(res.error()) + "'"); return std::vector(0); } } - std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const override - { + std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const override { json request_body; request_body["name"] = name; @@ -277,29 +237,23 @@ namespace umbridge json response_body = parse_result_with_error_handling(res); std::vector outputvec = response_body["outputSizes"].get>(); return outputvec; - } - else - { + } else { throw std::runtime_error("POST OutputSizes failed with error type '" + to_string(res.error()) + "'"); return std::vector(0); } } - std::vector> Evaluate(const std::vector> &inputs, json config_json = json::parse("{}")) override - { + std::vector> Evaluate(const std::vector> &inputs, json config_json = json::parse("{}")) override { #ifdef SUPPORT_POSIX_SHMEM - if (supportsEvaluateShMem) - { + if (supportsEvaluateShMem) { unsigned int tid = pthread_self(); std::vector> shmem_inputs; - for (int i = 0; i < inputs.size(); i++) - { + for (int i = 0; i < inputs.size(); i++) { shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(tid) + "_" + std::to_string(i))); } std::vector> shmem_outputs; std::vector output_sizes = GetOutputSizes(config_json); // Potential optimization: Avoid this call (e.g. share output memory with appropriate dimension from server side, sync with client via POSIX semaphore) - for (int i = 0; i < output_sizes.size(); i++) - { + for (int i = 0; i < output_sizes.size(); i++) { shmem_outputs.push_back(std::make_unique(output_sizes[i], "/umbridge_out_" + std::to_string(tid) + "_" + std::to_string(i), true)); } @@ -309,50 +263,39 @@ namespace umbridge request_body["config"] = config_json; request_body["shmem_name"] = "/umbridge"; request_body["shmem_num_inputs"] = inputs.size(); - for (int i = 0; i < inputs.size(); i++) - { + for (int i = 0; i < inputs.size(); i++) { request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); } - if (auto res = cli.Post("/EvaluateShMem", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/EvaluateShMem", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector> outputs(output_sizes.size()); - for (int i = 0; i < output_sizes.size(); i++) - { + for (int i = 0; i < output_sizes.size(); i++) { outputs[i] = shmem_outputs[i]->GetVector(); } return outputs; - } - else - { + } else { throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); } - } - else - { + } else { #endif json request_body; request_body["name"] = name; - for (std::size_t i = 0; i < inputs.size(); i++) - { + + for (std::size_t i = 0; i < inputs.size(); i++) { request_body["input"][i] = inputs[i]; } request_body["config"] = config_json; - if (auto res = cli.Post("/Evaluate", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/Evaluate", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector> outputs(response_body["output"].size()); - for (std::size_t i = 0; i < response_body["output"].size(); i++) - { + for (std::size_t i = 0; i < response_body["output"].size(); i++) { outputs[i] = response_body["output"][i].get>(); } return outputs; - } - else - { + } else { throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); } #ifdef SUPPORT_POSIX_SHMEM @@ -366,13 +309,12 @@ namespace umbridge const std::vector &sens, json config_json = json::parse("{}")) override { + #ifdef SUPPORT_POSIX_SHMEM - if (supportsGradientShMem) - { + if (supportsGradientShMem) { unsigned int tid = pthread_self(); std::vector> shmem_inputs; - for (int i = 0; i < inputs.size(); i++) - { + for (int i = 0; i < inputs.size(); i++) { shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(tid) + "_" + std::to_string(i))); } SharedMemoryVector shmem_output(inputs[inWrt].size(), "/umbridge_out_" + std::to_string(tid) + "_" + std::to_string(0), true); @@ -386,45 +328,35 @@ namespace umbridge request_body["shmem_name"] = "/umbridge"; request_body["sens"] = sens; request_body["shmem_num_inputs"] = inputs.size(); - for (int i = 0; i < inputs.size(); i++) - { + for (int i = 0; i < inputs.size(); i++) { request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); } - if (auto res = cli.Post("/GradientShMem", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/GradientShMem", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector output(inputs[inWrt].size()); output = shmem_output.GetVector(); return output; - } - else - { + } else { throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); } - } - else - { + } else { #endif json request_body; request_body["name"] = name; request_body["outWrt"] = outWrt; request_body["inWrt"] = inWrt; - for (std::size_t i = 0; i < inputs.size(); i++) - { + for (std::size_t i = 0; i < inputs.size(); i++) { request_body["input"][i] = inputs[i]; } request_body["sens"] = sens; request_body["config"] = config_json; - if (auto res = cli.Post("/Gradient", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/Gradient", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); return response_body["output"].get>(); - } - else - { + } else { throw std::runtime_error("POST Gradient failed with error type '" + to_string(res.error()) + "'"); } #ifdef SUPPORT_POSIX_SHMEM @@ -436,15 +368,13 @@ namespace umbridge unsigned int inWrt, const std::vector> &inputs, const std::vector &vec, - json config_json = json::parse("{}")) override - { + json config_json = json::parse("{}")) override { + #ifdef SUPPORT_POSIX_SHMEM - if (supportsApplyJacobianShMem) - { + if (supportsApplyJacobianShMem) { unsigned int tid = pthread_self(); std::vector> shmem_inputs; - for (int i = 0; i < inputs.size(); i++) - { + for (int i = 0; i < inputs.size(); i++) { shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(tid) + "_" + std::to_string(i))); } std::vector output_sizes = GetOutputSizes(config_json); // Potential optimization: Avoid this call (e.g. share output memory with appropriate dimension from server side, sync with client via POSIX semaphore) @@ -459,45 +389,35 @@ namespace umbridge request_body["vec"] = vec; request_body["shmem_name"] = "/umbridge"; request_body["shmem_num_inputs"] = inputs.size(); - for (int i = 0; i < inputs.size(); i++) - { + for (int i = 0; i < inputs.size(); i++) { request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); } - if (auto res = cli.Post("/ApplyJacobianShMem", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/ApplyJacobianShMem", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector output(output_sizes[outWrt]); output = shmem_output.GetVector(); return output; - } - else - { + } else { throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); } - } - else - { + } else { #endif json request_body; request_body["name"] = name; request_body["outWrt"] = outWrt; request_body["inWrt"] = inWrt; - for (std::size_t i = 0; i < inputs.size(); i++) - { + for (std::size_t i = 0; i < inputs.size(); i++) { request_body["input"][i] = inputs[i]; } request_body["vec"] = vec; request_body["config"] = config_json; - if (auto res = cli.Post("/ApplyJacobian", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/ApplyJacobian", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); return response_body["output"].get>(); - } - else - { + } else { throw std::runtime_error("POST ApplyJacobian failed with error type '" + to_string(res.error()) + "'"); } #ifdef SUPPORT_POSIX_SHMEM @@ -511,15 +431,13 @@ namespace umbridge const std::vector> &inputs, const std::vector &sens, const std::vector &vec, - json config_json = json::parse("{}")) override - { + json config_json = json::parse("{}")) override { + #ifdef SUPPORT_POSIX_SHMEM - if (supportsApplyHessianShMem) - { + if (supportsApplyHessianShMem) { unsigned int tid = pthread_self(); std::vector> shmem_inputs; - for (int i = 0; i < inputs.size(); i++) - { + for (int i = 0; i < inputs.size(); i++) { shmem_inputs.push_back(std::make_unique(inputs[i], "/umbridge_in_" + std::to_string(tid) + "_" + std::to_string(i))); } std::vector output_sizes = GetOutputSizes(config_json); // Potential optimization: Avoid this call (e.g. share output memory with appropriate dimension from server side, sync with client via POSIX semaphore) @@ -537,47 +455,37 @@ namespace umbridge request_body["sens"] = sens; request_body["vec"] = vec; request_body["shmem_num_inputs"] = inputs.size(); - for (int i = 0; i < inputs.size(); i++) - { + for (int i = 0; i < inputs.size(); i++) { request_body["shmem_size_" + std::to_string(i)] = inputs[i].size(); } - if (auto res = cli.Post("/ApplyHessianShMem", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/ApplyHessianShMem", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector output(output_sizes[outWrt]); output = shmem_output.GetVector(); return output; - } - else - { + } else { throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); } - } - else - { + } else { #endif json request_body; request_body["name"] = name; request_body["outWrt"] = outWrt; request_body["inWrt1"] = inWrt1; request_body["inWrt2"] = inWrt2; - for (std::size_t i = 0; i < inputs.size(); i++) - { + for (std::size_t i = 0; i < inputs.size(); i++) { request_body["input"][i] = inputs[i]; } request_body["sens"] = sens; request_body["vec"] = vec; request_body["config"] = config_json; - if (auto res = cli.Post("/ApplyHessian", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/ApplyHessian", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); return response_body["output"].get>(); - } - else - { + } else { throw std::runtime_error("POST ApplyHessian failed with error type '" + to_string(res.error()) + "'"); } #ifdef SUPPORT_POSIX_SHMEM @@ -585,20 +493,16 @@ namespace umbridge #endif } - bool SupportsEvaluate() override - { + bool SupportsEvaluate() override { return supportsEvaluate; } - bool SupportsGradient() override - { + bool SupportsGradient() override { return supportsGradient; } - bool SupportsApplyJacobian() override - { + bool SupportsApplyJacobian() override { return supportsApplyJacobian; } - bool SupportsApplyHessian() override - { + bool SupportsApplyHessian() override { return supportsApplyHessian; } @@ -615,19 +519,14 @@ namespace umbridge bool supportsApplyJacobianShMem = false; bool supportsApplyHessianShMem = false; - json parse_result_with_error_handling(const httplib::Result &res) const - { + json parse_result_with_error_handling(const httplib::Result &res) const { json response_body; - try - { + try { response_body = json::parse(res->body); - } - catch (json::parse_error &e) - { + } catch (json::parse_error &e) { throw std::runtime_error("Response JSON could not be parsed. Response body: '" + res->body + "'"); } - if (response_body.find("error") != response_body.end()) - { + if (response_body.find("error") != response_body.end()) { throw std::runtime_error("Model server returned error of type " + response_body["error"]["type"].get() + ", message: " + response_body["error"]["message"].get()); } return response_body; @@ -635,10 +534,8 @@ namespace umbridge }; // Check if inputs dimensions match model's expected input size and return error in httplib response - bool check_input_sizes(const std::vector> &inputs, const json &config_json, const Model &model, httplib::Response &res) - { - if (inputs.size() != model.GetInputSizes(config_json).size()) - { + bool check_input_sizes(const std::vector> &inputs, const json &config_json, const Model &model, httplib::Response &res) { + if (inputs.size() != model.GetInputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Number of inputs does not match number of model inputs. Expected " + std::to_string(model.GetInputSizes(config_json).size()) + " but got " + std::to_string(inputs.size()); @@ -646,10 +543,8 @@ namespace umbridge res.status = 400; return false; } - for (std::size_t i = 0; i < inputs.size(); i++) - { - if (inputs[i].size() != model.GetInputSizes(config_json)[i]) - { + for (std::size_t i = 0; i < inputs.size(); i++) { + if (inputs[i].size() != model.GetInputSizes(config_json)[i]) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input size mismatch! In input " + std::to_string(i) + " model expected size " + std::to_string(model.GetInputSizes(config_json)[i]) + " but got " + std::to_string(inputs[i].size()); @@ -662,10 +557,8 @@ namespace umbridge } // Check if sensitivity vector's dimension matches correct model output size and return error in httplib response - bool check_sensitivity_size(const std::vector &sens, int outWrt, const json &config_json, const Model &model, httplib::Response &res) - { - if (sens.size() != model.GetOutputSizes(config_json)[outWrt]) - { + bool check_sensitivity_size(const std::vector &sens, int outWrt, const json &config_json, const Model &model, httplib::Response &res) { + if (sens.size() != model.GetOutputSizes(config_json)[outWrt]) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Sensitivity vector size mismatch! Expected " + std::to_string(model.GetOutputSizes(config_json)[outWrt]) + " but got " + std::to_string(sens.size()); @@ -677,10 +570,8 @@ namespace umbridge } // Check if vector's dimension matches correct model output size and return error in httplib response - bool check_vector_size(const std::vector &vec, int inWrt, const json &config_json, const Model &model, httplib::Response &res) - { - if (vec.size() != model.GetInputSizes(config_json)[inWrt]) - { + bool check_vector_size(const std::vector &vec, int inWrt, const json &config_json, const Model &model, httplib::Response &res) { + if (vec.size() != model.GetInputSizes(config_json)[inWrt]) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Vector size mismatch! Expected " + std::to_string(model.GetInputSizes(config_json)[inWrt]) + " but got " + std::to_string(vec.size()); @@ -692,10 +583,8 @@ namespace umbridge } // Check if outputs dimensions match model's expected output size and return error in httplib response - bool check_output_sizes(const std::vector> &outputs, const json &config_json, const Model &model, httplib::Response &res) - { - if (outputs.size() != model.GetOutputSizes(config_json).size()) - { + bool check_output_sizes(const std::vector> &outputs, const json &config_json, const Model &model, httplib::Response &res) { + if (outputs.size() != model.GetOutputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidOutput"; response_body["error"]["message"] = "Number of outputs declared by model does not match number of outputs returned by model. Model declared " + std::to_string(model.GetOutputSizes(config_json).size()) + " but returned " + std::to_string(outputs.size()); @@ -703,10 +592,8 @@ namespace umbridge res.status = 500; return false; } - for (std::size_t i = 0; i < outputs.size(); i++) - { - if (outputs[i].size() != model.GetOutputSizes(config_json)[i]) - { + for (std::size_t i = 0; i < outputs.size(); i++) { + if (outputs[i].size() != model.GetOutputSizes(config_json)[i]) { json response_body; response_body["error"]["type"] = "InvalidOutput"; response_body["error"]["message"] = "Output size mismatch! In output " + std::to_string(i) + " model declared size " + std::to_string(model.GetOutputSizes(config_json)[i]) + " but returned " + std::to_string(outputs[i].size()); @@ -719,10 +606,8 @@ namespace umbridge } // Check if inWrt is between zero and model's input size inWrt and return error in httplib response - bool check_input_wrt(int inWrt, const json &config_json, const Model &model, httplib::Response &res) - { - if (inWrt < 0 || inWrt >= (int)model.GetInputSizes(config_json).size()) - { + bool check_input_wrt(int inWrt, const json &config_json, const Model &model, httplib::Response &res) { + if (inWrt < 0 || inWrt >= (int)model.GetInputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input inWrt out of range! Expected between 0 and " + std::to_string(model.GetInputSizes(config_json).size() - 1) + " but got " + std::to_string(inWrt); @@ -734,10 +619,8 @@ namespace umbridge } // Check if outWrt is between zero and model's output size outWrt and return error in httplib response - bool check_output_wrt(int outWrt, const json &config_json, const Model &model, httplib::Response &res) - { - if (outWrt < 0 || outWrt >= (int)model.GetOutputSizes(config_json).size()) - { + bool check_output_wrt(int outWrt, const json &config_json, const Model &model, httplib::Response &res) { + if (outWrt < 0 || outWrt >= (int)model.GetOutputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input outWrt out of range! Expected between 0 and " + std::to_string(model.GetOutputSizes(config_json).size() - 1) + " but got " + std::to_string(outWrt); @@ -749,8 +632,7 @@ namespace umbridge } // Construct response for unsupported feature - void write_unsupported_feature_response(httplib::Response &res, std::string feature) - { + void write_unsupported_feature_response(httplib::Response &res, std::string feature) { json response_body; response_body["error"]["type"] = "UnsupportedFeature"; response_body["error"]["message"] = "Feature '" + feature + "' is not supported by this model"; @@ -760,18 +642,14 @@ namespace umbridge // log request - void log_request(const httplib::Request &req, const httplib::Response &res) - { + void log_request(const httplib::Request &req, const httplib::Response &res) { std::cout << "Incoming request from: " << req.remote_addr << " | Type: " << req.method << " " << req.path << " -> " << res.status << std::endl; } // Get model from name - Model &get_model_from_name(std::vector &models, std::string name) - { - for (auto &model : models) - { - if (model->GetName() == name) - { + Model &get_model_from_name(std::vector &models, std::string name) { + for (auto &model : models) { + if (model->GetName() == name) { return *model; } } @@ -779,14 +657,10 @@ namespace umbridge } // Check if model exists and return error in httplib response - bool check_model_exists(std::vector &models, std::string name, httplib::Response &res) - { - try - { + bool check_model_exists(std::vector &models, std::string name, httplib::Response &res) { + try { get_model_from_name(models, name); - } - catch (std::runtime_error &e) - { + } catch (std::runtime_error &e) { json response_body; response_body["error"]["type"] = "ModelNotFound"; response_body["error"]["message"] = "Model '" + name + "' not supported by this server!"; @@ -798,14 +672,12 @@ namespace umbridge } // Provides access to a model via network - void serveModels(std::vector models, std::string host, int port) - { + void serveModels(std::vector models, std::string host, int port) { httplib::Server svr; std::mutex model_mutex; // Ensure the underlying model is only called sequentially - svr.Post("/Evaluate", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/Evaluate", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -840,8 +712,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); #ifdef SUPPORT_POSIX_SHMEM - svr.Post("/EvaluateShMem", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/EvaluateShMem", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -881,8 +752,7 @@ namespace umbridge json response_body; res.set_content(response_body.dump(), "application/json"); }); #endif - svr.Post("/Gradient", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/Gradient", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -923,8 +793,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); #ifdef SUPPORT_POSIX_SHMEM - svr.Post("/GradientShMem", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/GradientShMem", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -969,8 +838,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); #endif - svr.Post("/ApplyJacobian", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/ApplyJacobian", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -1011,8 +879,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); #ifdef SUPPORT_POSIX_SHMEM - svr.Post("/ApplyJacobianShMem", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/ApplyJacobianShMem", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -1055,8 +922,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); #endif - svr.Post("/ApplyHessian", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/ApplyHessian", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -1101,8 +967,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); #ifdef SUPPORT_POSIX_SHMEM - svr.Post("/ApplyHessianShMem", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/ApplyHessianShMem", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -1149,8 +1014,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); #endif - svr.Get("/Info", [&](const httplib::Request &, httplib::Response &res) - { + svr.Get("/Info", [&](const httplib::Request &, httplib::Response &res) { json response_body; response_body["protocolVersion"] = 1.0; std::vector model_names; @@ -1161,8 +1025,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); - svr.Post("/ModelInfo", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/ModelInfo", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -1180,8 +1043,7 @@ namespace umbridge response_body["support"]["ApplyHessianShMem"] = model.SupportsApplyHessian(); res.set_content(response_body.dump(), "application/json"); }); - svr.Post("/InputSizes", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/InputSizes", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -1195,8 +1057,7 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); - svr.Post("/OutputSizes", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/OutputSizes", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -1210,25 +1071,26 @@ namespace umbridge res.set_content(response_body.dump(), "application/json"); }); #ifdef SUPPORT_POSIX_SHMEM - svr.Post("/TestShMem", [&](const httplib::Request &req, httplib::Response &res) - { - json request_body = json::parse(req.body); - if (!check_model_exists(models, request_body["name"], res)) - return; - Model &model = get_model_from_name(models, request_body["name"]); - SharedMemoryVector shmem_input(1, "/umbridge_test_shmem_in_" + request_body["tid"].get(), false); - SharedMemoryVector shmem_output(1, "/umbridge_test_shmem_out_" + request_body["tid"].get(), false); - std::vector value = shmem_input.GetVector(); - shmem_output.SetVector(value); - json response_body; - response_body["value"] = value; - res.set_content(response_body.dump(), "application/json"); }); + svr.Post("/TestShMem", [&](const httplib::Request &req, httplib::Response &res) { + json request_body = json::parse(req.body); + if (!check_model_exists(models, request_body["name"], res)) + return; + Model &model = get_model_from_name(models, request_body["name"]); + json response_body; + try { + SharedMemoryVector shmem_input(1, "/umbridge_test_shmem_in_" + request_body["tid"].get(), false); + SharedMemoryVector shmem_output(1, "/umbridge_test_shmem_out_" + request_body["tid"].get(), false); + std::vector value = shmem_input.GetVector(); + shmem_output.SetVector(value); + response_body["value"] = value; + } + catch(std::exception){} + res.set_content(response_body.dump(), "application/json"); }); #endif std::cout << "Listening on port " << port << "..." << std::endl; #ifdef LOGGING - svr.set_logger([](const httplib::Request &req, const httplib::Response &res) - { + svr.set_logger([](const httplib::Request &req, const httplib::Response &res) { if (res.status >= 500) { std::cerr << "[ERROR] "; } else if (res.status >= 400) { From 97cc23a29ea92858e7f001c7a3ae14558af05be9 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 25 Mar 2024 00:30:18 +0100 Subject: [PATCH 05/21] Update um.py --- umbridge/um.py | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/umbridge/um.py b/umbridge/um.py index ee5e78d1..5dbb8b97 100755 --- a/umbridge/um.py +++ b/umbridge/um.py @@ -852,19 +852,21 @@ async def test_shmem(request): model = get_model_from_name(model_name) if model is None: return model_not_found_response(req_json["name"]) - - parameters = [] - shm_c_in = shared_memory.SharedMemory("/umbridge_test_shmem_in_" + str(req_json["tid"]), False, 8) - raw_shmem_parameter = np.ndarray(1, dtype=np.float64, buffer=shm_c_in.buf) - parameters.append(raw_shmem_parameter.tolist()) - shm_c_in.close() - - shm_c_out = shared_memory.SharedMemory("/umbridge_test_shmem_out_" + str(req_json["tid"]), create=False, size=8) - raw_shmem_output = np.ndarray(1, dtype=np.float64, buffer=shm_c_out.buf) - raw_shmem_output[:] = parameters[0] - shm_c_out.close() response_body= {} - response_body["value"] = parameters[0] + try:#in case the test fails, FileNotFoundError will be thrown + parameters = [] + shm_c_in = shared_memory.SharedMemory("/umbridge_test_shmem_in_" + str(req_json["tid"]), False, 8) + raw_shmem_parameter = np.ndarray(1, dtype=np.float64, buffer=shm_c_in.buf) + parameters.append(raw_shmem_parameter.tolist()) + shm_c_in.close() + + shm_c_out = shared_memory.SharedMemory("/umbridge_test_shmem_out_" + str(req_json["tid"]), create=False, size=8) + raw_shmem_output = np.ndarray(1, dtype=np.float64, buffer=shm_c_out.buf) + raw_shmem_output[:] = parameters[0] + shm_c_out.close() + response_body["value"] = parameters[0] + except: + pass return web.json_response(response_body) @routes.get('/Info') From 1cf54180b8d854d7f4b6d49b980dfdc44260c767 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 25 Mar 2024 00:44:25 +0100 Subject: [PATCH 06/21] Update umbridge.h Fix indentation for comparison --- lib/umbridge.h | 76 +++++++++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 38 deletions(-) diff --git a/lib/umbridge.h b/lib/umbridge.h index c7cc3003..6e380202 100644 --- a/lib/umbridge.h +++ b/lib/umbridge.h @@ -33,10 +33,10 @@ namespace umbridge public: Model(std::string name) : name(name) {} - virtual std::vector GetInputSizes(const json &config_json = json::parse("{}")) const = 0; - virtual std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const = 0; + virtual std::vector GetInputSizes(const json& config_json = json::parse("{}")) const = 0; + virtual std::vector GetOutputSizes(const json& config_json = json::parse("{}")) const = 0; - virtual std::vector> Evaluate(const std::vector> &inputs, + virtual std::vector> Evaluate(const std::vector>& inputs, json config_json = json::parse("{}")) { (void)inputs; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("Evaluate was called, but not implemented by model!"); @@ -44,8 +44,8 @@ namespace umbridge virtual std::vector Gradient(unsigned int outWrt, unsigned int inWrt, - const std::vector> &inputs, - const std::vector &sens, + const std::vector>& inputs, + const std::vector& sens, json config_json = json::parse("{}")) { (void)outWrt; (void)inWrt; (void)inputs; (void)sens; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("Gradient was called, but not implemented by model!"); @@ -53,8 +53,8 @@ namespace umbridge virtual std::vector ApplyJacobian(unsigned int outWrt, unsigned int inWrt, - const std::vector> &inputs, - const std::vector &vec, + const std::vector>& inputs, + const std::vector& vec, json config_json = json::parse("{}")) { (void)outWrt; (void)inWrt; (void)inputs; (void)vec; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("ApplyJacobian was called, but not implemented by model!"); @@ -63,9 +63,9 @@ namespace umbridge virtual std::vector ApplyHessian(unsigned int outWrt, unsigned int inWrt1, unsigned int inWrt2, - const std::vector> &inputs, - const std::vector &sens, - const std::vector &vec, + const std::vector>& inputs, + const std::vector& sens, + const std::vector& vec, json config_json = json::parse("{}")) { (void)outWrt; (void)inWrt1; (void)inWrt2; (void)inputs; (void)sens; (void)vec; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("ApplyHessian was called, but not implemented by model!"); @@ -121,7 +121,7 @@ namespace umbridge assert(ptr); } - SharedMemoryVector(const std::vector &vector, std::string shmem_name) + SharedMemoryVector(const std::vector& vector, std::string shmem_name) : SharedMemoryVector(vector.size(), shmem_name, true) { SetVector(vector); } @@ -132,7 +132,7 @@ namespace umbridge return vector; } - void SetVector(const std::vector &vector) { + void SetVector(const std::vector& vector) { memcpy(ptr, vector.data(), length); } @@ -161,7 +161,7 @@ namespace umbridge std::vector models = SupportedModels(host, headers); if (std::find(models.begin(), models.end(), name) == models.end()) { std::string model_names = ""; - for (auto &m : models) { + for (auto& m : models) { model_names += "'" + m + "' "; } throw std::runtime_error("Model " + name + " not found on server! Available models: " + model_names + "."); @@ -208,7 +208,7 @@ namespace umbridge #endif } - std::vector GetInputSizes(const json &config_json = json::parse("{}")) const override { + std::vector GetInputSizes(const json& config_json = json::parse("{}")) const override { json request_body; request_body["name"] = name; @@ -225,7 +225,7 @@ namespace umbridge } } - std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const override { + std::vector GetOutputSizes(const json& config_json = json::parse("{}")) const override { json request_body; request_body["name"] = name; @@ -243,7 +243,7 @@ namespace umbridge } } - std::vector> Evaluate(const std::vector> &inputs, json config_json = json::parse("{}")) override { + std::vector> Evaluate(const std::vector>& inputs, json config_json = json::parse("{}")) override { #ifdef SUPPORT_POSIX_SHMEM if (supportsEvaluateShMem) { unsigned int tid = pthread_self(); @@ -305,8 +305,8 @@ namespace umbridge std::vector Gradient(unsigned int outWrt, unsigned int inWrt, - const std::vector> &inputs, - const std::vector &sens, + const std::vector>& inputs, + const std::vector& sens, json config_json = json::parse("{}")) override { @@ -366,8 +366,8 @@ namespace umbridge std::vector ApplyJacobian(unsigned int outWrt, unsigned int inWrt, - const std::vector> &inputs, - const std::vector &vec, + const std::vector>& inputs, + const std::vector& vec, json config_json = json::parse("{}")) override { #ifdef SUPPORT_POSIX_SHMEM @@ -428,9 +428,9 @@ namespace umbridge std::vector ApplyHessian(unsigned int outWrt, unsigned int inWrt1, unsigned int inWrt2, - const std::vector> &inputs, - const std::vector &sens, - const std::vector &vec, + const std::vector>& inputs, + const std::vector& sens, + const std::vector& vec, json config_json = json::parse("{}")) override { #ifdef SUPPORT_POSIX_SHMEM @@ -519,11 +519,11 @@ namespace umbridge bool supportsApplyJacobianShMem = false; bool supportsApplyHessianShMem = false; - json parse_result_with_error_handling(const httplib::Result &res) const { + json parse_result_with_error_handling(const httplib::Result& res) const { json response_body; try { response_body = json::parse(res->body); - } catch (json::parse_error &e) { + } catch (json::parse_error& e) { throw std::runtime_error("Response JSON could not be parsed. Response body: '" + res->body + "'"); } if (response_body.find("error") != response_body.end()) { @@ -534,7 +534,7 @@ namespace umbridge }; // Check if inputs dimensions match model's expected input size and return error in httplib response - bool check_input_sizes(const std::vector> &inputs, const json &config_json, const Model &model, httplib::Response &res) { + bool check_input_sizes(const std::vector>& inputs, const json& config_json, const Model& model, httplib::Response& res) { if (inputs.size() != model.GetInputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; @@ -557,7 +557,7 @@ namespace umbridge } // Check if sensitivity vector's dimension matches correct model output size and return error in httplib response - bool check_sensitivity_size(const std::vector &sens, int outWrt, const json &config_json, const Model &model, httplib::Response &res) { + bool check_sensitivity_size(const std::vector& sens, int outWrt, const json& config_json, const Model& model, httplib::Response& res) { if (sens.size() != model.GetOutputSizes(config_json)[outWrt]) { json response_body; response_body["error"]["type"] = "InvalidInput"; @@ -570,7 +570,7 @@ namespace umbridge } // Check if vector's dimension matches correct model output size and return error in httplib response - bool check_vector_size(const std::vector &vec, int inWrt, const json &config_json, const Model &model, httplib::Response &res) { + bool check_vector_size(const std::vector& vec, int inWrt, const json& config_json, const Model& model, httplib::Response& res) { if (vec.size() != model.GetInputSizes(config_json)[inWrt]) { json response_body; response_body["error"]["type"] = "InvalidInput"; @@ -583,7 +583,7 @@ namespace umbridge } // Check if outputs dimensions match model's expected output size and return error in httplib response - bool check_output_sizes(const std::vector> &outputs, const json &config_json, const Model &model, httplib::Response &res) { + bool check_output_sizes(const std::vector>& outputs, const json& config_json, const Model& model, httplib::Response& res) { if (outputs.size() != model.GetOutputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidOutput"; @@ -606,7 +606,7 @@ namespace umbridge } // Check if inWrt is between zero and model's input size inWrt and return error in httplib response - bool check_input_wrt(int inWrt, const json &config_json, const Model &model, httplib::Response &res) { + bool check_input_wrt(int inWrt, const json& config_json, const Model& model, httplib::Response& res) { if (inWrt < 0 || inWrt >= (int)model.GetInputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; @@ -619,7 +619,7 @@ namespace umbridge } // Check if outWrt is between zero and model's output size outWrt and return error in httplib response - bool check_output_wrt(int outWrt, const json &config_json, const Model &model, httplib::Response &res) { + bool check_output_wrt(int outWrt, const json& config_json, const Model& model, httplib::Response& res) { if (outWrt < 0 || outWrt >= (int)model.GetOutputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; @@ -632,7 +632,7 @@ namespace umbridge } // Construct response for unsupported feature - void write_unsupported_feature_response(httplib::Response &res, std::string feature) { + void write_unsupported_feature_response(httplib::Response& res, std::string feature) { json response_body; response_body["error"]["type"] = "UnsupportedFeature"; response_body["error"]["message"] = "Feature '" + feature + "' is not supported by this model"; @@ -642,13 +642,13 @@ namespace umbridge // log request - void log_request(const httplib::Request &req, const httplib::Response &res) { + void log_request(const httplib::Request& req, const httplib::Response& res) { std::cout << "Incoming request from: " << req.remote_addr << " | Type: " << req.method << " " << req.path << " -> " << res.status << std::endl; } // Get model from name - Model &get_model_from_name(std::vector &models, std::string name) { - for (auto &model : models) { + Model& get_model_from_name(std::vector& models, std::string name) { + for (auto& model : models) { if (model->GetName() == name) { return *model; } @@ -657,10 +657,10 @@ namespace umbridge } // Check if model exists and return error in httplib response - bool check_model_exists(std::vector &models, std::string name, httplib::Response &res) { + bool check_model_exists(std::vector& models, std::string name, httplib::Response& res) { try { get_model_from_name(models, name); - } catch (std::runtime_error &e) { + } catch (std::runtime_error& e) { json response_body; response_body["error"]["type"] = "ModelNotFound"; response_body["error"]["message"] = "Model '" + name + "' not supported by this server!"; @@ -672,7 +672,7 @@ namespace umbridge } // Provides access to a model via network - void serveModels(std::vector models, std::string host, int port) { + void serveModels(std::vector models, std::string host, int port) { httplib::Server svr; std::mutex model_mutex; // Ensure the underlying model is only called sequentially From 08344e788265ae721c94ab3e961f32ce09e24ab3 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 25 Mar 2024 00:58:57 +0100 Subject: [PATCH 07/21] Update umbridge.h --- lib/umbridge.h | 98 +++++++++++++++++++++++++++----------------------- 1 file changed, 54 insertions(+), 44 deletions(-) diff --git a/lib/umbridge.h b/lib/umbridge.h index 6e380202..69183255 100644 --- a/lib/umbridge.h +++ b/lib/umbridge.h @@ -14,7 +14,7 @@ // Increase timeout to allow for long-running models. // This should be (to be on the safe side) significantly greater than the maximum time your model may take -#define CPPHTTPLIB_READ_TIMEOUT_SECOND 60 * 60 +#define CPPHTTPLIB_READ_TIMEOUT_SECOND 60*60 #include #include @@ -25,11 +25,9 @@ using json = nlohmann::json; -namespace umbridge -{ +namespace umbridge { - class Model - { + class Model { public: Model(std::string name) : name(name) {} @@ -71,12 +69,12 @@ namespace umbridge throw std::runtime_error("ApplyHessian was called, but not implemented by model!"); } - virtual bool SupportsEvaluate() { return false; } - virtual bool SupportsGradient() { return false; } - virtual bool SupportsApplyJacobian() { return false; } - virtual bool SupportsApplyHessian() { return false; } + virtual bool SupportsEvaluate() {return false;} + virtual bool SupportsGradient() {return false;} + virtual bool SupportsApplyJacobian() {return false;} + virtual bool SupportsApplyHessian() {return false;} - std::string GetName() const { return name; } + std::string GetName() const {return name;} protected: std::string name; @@ -87,7 +85,7 @@ namespace umbridge if (auto res = cli.Get("/Info", headers)) { json response = json::parse(res->body); - if (response.value("protocolVersion", 0) != 1.0) + if (response.value("protocolVersion",0) != 1.0) throw std::runtime_error("Model protocol version not supported!"); return response["models"]; @@ -155,7 +153,7 @@ namespace umbridge public: HTTPModel(std::string host, std::string name, httplib::Headers headers = httplib::Headers()) - : Model(name), cli(host.c_str()), headers(headers) + : Model(name), cli(host.c_str()), headers(headers) { // Check if requested model is available on server std::vector models = SupportedModels(host, headers); @@ -232,8 +230,7 @@ namespace umbridge if (!config_json.empty()) request_body["config"] = config_json; - if (auto res = cli.Post("/OutputSizes", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/OutputSizes", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector outputvec = response_body["outputSizes"].get>(); return outputvec; @@ -304,10 +301,10 @@ namespace umbridge } std::vector Gradient(unsigned int outWrt, - unsigned int inWrt, - const std::vector>& inputs, - const std::vector& sens, - json config_json = json::parse("{}")) override + unsigned int inWrt, + const std::vector>& inputs, + const std::vector& sens, + json config_json = json::parse("{}")) override { #ifdef SUPPORT_POSIX_SHMEM @@ -338,7 +335,7 @@ namespace umbridge output = shmem_output.GetVector(); return output; } else { - throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + throw std::runtime_error("POST Gradient failed with error type '" + to_string(res.error()) + "'"); } } else { #endif @@ -365,10 +362,10 @@ namespace umbridge } std::vector ApplyJacobian(unsigned int outWrt, - unsigned int inWrt, - const std::vector>& inputs, - const std::vector& vec, - json config_json = json::parse("{}")) override { + unsigned int inWrt, + const std::vector>& inputs, + const std::vector& vec, + json config_json = json::parse("{}")) override { #ifdef SUPPORT_POSIX_SHMEM if (supportsApplyJacobianShMem) { @@ -399,7 +396,7 @@ namespace umbridge output = shmem_output.GetVector(); return output; } else { - throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + throw std::runtime_error("POST ApplyJacobian failed with error type '" + to_string(res.error()) + "'"); } } else { #endif @@ -426,12 +423,12 @@ namespace umbridge } std::vector ApplyHessian(unsigned int outWrt, - unsigned int inWrt1, - unsigned int inWrt2, - const std::vector>& inputs, - const std::vector& sens, - const std::vector& vec, - json config_json = json::parse("{}")) override { + unsigned int inWrt1, + unsigned int inWrt2, + const std::vector>& inputs, + const std::vector& sens, + const std::vector& vec, + json config_json = json::parse("{}")) override { #ifdef SUPPORT_POSIX_SHMEM if (supportsApplyHessianShMem) { @@ -465,7 +462,7 @@ namespace umbridge output = shmem_output.GetVector(); return output; } else { - throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); + throw std::runtime_error("POST ApplyHessian failed with error type '" + to_string(res.error()) + "'"); } } else { #endif @@ -507,6 +504,7 @@ namespace umbridge } private: + mutable httplib::Client cli; httplib::Headers headers; @@ -531,6 +529,7 @@ namespace umbridge } return response_body; } + }; // Check if inputs dimensions match model's expected input size and return error in httplib response @@ -643,7 +642,7 @@ namespace umbridge // log request void log_request(const httplib::Request& req, const httplib::Response& res) { - std::cout << "Incoming request from: " << req.remote_addr << " | Type: " << req.method << " " << req.path << " -> " << res.status << std::endl; + std::cout << "Incoming request from: " << req.remote_addr << " | Type: " << req.method << " " << req.path << " -> " << res.status << std::endl; } // Get model from name @@ -710,7 +709,8 @@ namespace umbridge response_body["output"][i] = outputs[i]; } - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); #ifdef SUPPORT_POSIX_SHMEM svr.Post("/EvaluateShMem", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); @@ -791,7 +791,8 @@ namespace umbridge json response_body; response_body["output"] = gradient; - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); #ifdef SUPPORT_POSIX_SHMEM svr.Post("/GradientShMem", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); @@ -835,7 +836,8 @@ namespace umbridge json response_body; - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); #endif svr.Post("/ApplyJacobian", [&](const httplib::Request &req, httplib::Response &res) { @@ -965,7 +967,8 @@ namespace umbridge json response_body; response_body["output"] = hessian_action; - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); #ifdef SUPPORT_POSIX_SHMEM svr.Post("/ApplyHessianShMem", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); @@ -1012,7 +1015,8 @@ namespace umbridge json response_body; shmem_output.SetVector(hessian_action); - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); #endif svr.Get("/Info", [&](const httplib::Request &, httplib::Response &res) { json response_body; @@ -1023,7 +1027,8 @@ namespace umbridge } response_body["models"] = model_names; - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); svr.Post("/ModelInfo", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); @@ -1041,7 +1046,8 @@ namespace umbridge response_body["support"]["ApplyJacobianShMem"] = model.SupportsApplyJacobian(); response_body["support"]["ApplyHessian"] = model.SupportsApplyHessian(); response_body["support"]["ApplyHessianShMem"] = model.SupportsApplyHessian(); - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); svr.Post("/InputSizes", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); @@ -1055,7 +1061,8 @@ namespace umbridge json response_body; response_body["inputSizes"] = model.GetInputSizes(config_json); - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); svr.Post("/OutputSizes", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); @@ -1069,7 +1076,8 @@ namespace umbridge json response_body; response_body["outputSizes"] = model.GetOutputSizes(config_json); - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); #ifdef SUPPORT_POSIX_SHMEM svr.Post("/TestShMem", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); @@ -1085,12 +1093,13 @@ namespace umbridge response_body["value"] = value; } catch(std::exception){} - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); #endif std::cout << "Listening on port " << port << "..." << std::endl; #ifdef LOGGING - svr.set_logger([](const httplib::Request &req, const httplib::Response &res) { + svr.set_logger([](const httplib::Request& req, const httplib::Response& res) { if (res.status >= 500) { std::cerr << "[ERROR] "; } else if (res.status >= 400) { @@ -1098,7 +1107,8 @@ namespace umbridge } else { std::cout << "[INFO] "; } - log_request(req, res); }); + log_request(req, res); + }); #endif svr.listen(host.c_str(), port); std::cout << "Quit" << std::endl; From 3d9cab3b667d11d56062ef08dc95c819e4b53071 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 11:57:32 +0200 Subject: [PATCH 08/21] Create sharedmemtest.yml --- .github/workflows/sharedmemtest.yml | 31 +++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 .github/workflows/sharedmemtest.yml diff --git a/.github/workflows/sharedmemtest.yml b/.github/workflows/sharedmemtest.yml new file mode 100644 index 00000000..3cb3822a --- /dev/null +++ b/.github/workflows/sharedmemtest.yml @@ -0,0 +1,31 @@ +name: shared-mem-test + +on: + push: + branches: + - 'sharedmem' + +jobs: + + test: + runs-on: ubuntu-latest + container: ubuntu:latest + + services: + model: + image: linusseelinger/model-exahype-tsunami + ports: + - 4242:4242 + + steps: + - + name: Checkout + uses: actions/checkout@v2 + - + name: Dependencies + run: | + apt update; DEBIAN_FRONTEND="noninteractive" apt install -y g++ libssl-dev + - + name: Build and run + run: | + cd clients/c++ && ./build.sh && ./http-client model:4242 From 48da249fab9626b0ce52b5163567ecbecbd3e018 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 12:03:57 +0200 Subject: [PATCH 09/21] Update sharedmemtest.yml --- .github/workflows/sharedmemtest.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/sharedmemtest.yml b/.github/workflows/sharedmemtest.yml index 3cb3822a..18f2570d 100644 --- a/.github/workflows/sharedmemtest.yml +++ b/.github/workflows/sharedmemtest.yml @@ -16,6 +16,7 @@ jobs: image: linusseelinger/model-exahype-tsunami ports: - 4242:4242 + options: --ipc=host steps: - From ca43533abd453e3b32f310f1f406090ce88a0662 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 12:11:03 +0200 Subject: [PATCH 10/21] Update sharedmemtest.yml --- .github/workflows/sharedmemtest.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sharedmemtest.yml b/.github/workflows/sharedmemtest.yml index 18f2570d..d989a159 100644 --- a/.github/workflows/sharedmemtest.yml +++ b/.github/workflows/sharedmemtest.yml @@ -9,7 +9,9 @@ jobs: test: runs-on: ubuntu-latest - container: ubuntu:latest + container: + image: ubuntu:latest + options: --ipc=host services: model: From a550c9e23ece72893ed0ab14af909e48712d3005 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 12:21:47 +0200 Subject: [PATCH 11/21] Update sharedmemtest.yml --- .github/workflows/sharedmemtest.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/sharedmemtest.yml b/.github/workflows/sharedmemtest.yml index d989a159..c034c216 100644 --- a/.github/workflows/sharedmemtest.yml +++ b/.github/workflows/sharedmemtest.yml @@ -3,15 +3,13 @@ name: shared-mem-test on: push: branches: - - 'sharedmem' + - 'main' jobs: test: runs-on: ubuntu-latest - container: - image: ubuntu:latest - options: --ipc=host + container: ubuntu:latest services: model: From d4fb88524e35cdba34bb279ac0530d74be2db87c Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 12:42:57 +0200 Subject: [PATCH 12/21] Update and rename sharedmemtest.yml to sharedmemtest-c++.yml --- .github/workflows/{sharedmemtest.yml => sharedmemtest-c++.yml} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename .github/workflows/{sharedmemtest.yml => sharedmemtest-c++.yml} (95%) diff --git a/.github/workflows/sharedmemtest.yml b/.github/workflows/sharedmemtest-c++.yml similarity index 95% rename from .github/workflows/sharedmemtest.yml rename to .github/workflows/sharedmemtest-c++.yml index c034c216..1dcc8052 100644 --- a/.github/workflows/sharedmemtest.yml +++ b/.github/workflows/sharedmemtest-c++.yml @@ -1,4 +1,4 @@ -name: shared-mem-test +name: shared-mem-test-c++ on: push: From fc559bf714c0159215a30f8af043938e3f9ce4ba Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 12:45:24 +0200 Subject: [PATCH 13/21] Create sharedmemtest-python --- .github/workflows/sharedmemtest-python | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 .github/workflows/sharedmemtest-python diff --git a/.github/workflows/sharedmemtest-python b/.github/workflows/sharedmemtest-python new file mode 100644 index 00000000..fad4a3c0 --- /dev/null +++ b/.github/workflows/sharedmemtest-python @@ -0,0 +1,32 @@ +name: shared-mem-test-python + +on: + push: + branches: + - 'sharedmem' + +jobs: + + test: + runs-on: ubuntu-latest + container: ubuntu:latest + + services: + model: + image: linusseelinger/model-exahype-tsunami:latest + ports: + - 4242:4242 + options: --ipc=host + + steps: + - + name: Checkout + uses: actions/checkout@v2 + - + name: Dependencies + run: | + apt update && DEBIAN_FRONTEND="noninteractive" apt install -y python3-pip && pip3 install umbridge + - + name: Build and run + run: | + cd clients/python && python3 umbridge-client.py http://model:4242 From 125932bb26b70b4a9c2d454f9c58048e9b420524 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 12:45:50 +0200 Subject: [PATCH 14/21] Rename sharedmemtest-python to sharedmemtest-python.yml --- .../workflows/{sharedmemtest-python => sharedmemtest-python.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{sharedmemtest-python => sharedmemtest-python.yml} (100%) diff --git a/.github/workflows/sharedmemtest-python b/.github/workflows/sharedmemtest-python.yml similarity index 100% rename from .github/workflows/sharedmemtest-python rename to .github/workflows/sharedmemtest-python.yml From 34a9e51c7845cf1aa739cdf4c6bc7ad0dddd9a99 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 13:00:08 +0200 Subject: [PATCH 15/21] Update sharedmemtest-python.yml --- .github/workflows/sharedmemtest-python.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sharedmemtest-python.yml b/.github/workflows/sharedmemtest-python.yml index fad4a3c0..47bf73d4 100644 --- a/.github/workflows/sharedmemtest-python.yml +++ b/.github/workflows/sharedmemtest-python.yml @@ -3,7 +3,7 @@ name: shared-mem-test-python on: push: branches: - - 'sharedmem' + - 'main' jobs: From 537fde98bc07b7f235c1afd837a94601e8264996 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:24:41 +0200 Subject: [PATCH 16/21] Update sharedmemtest-c++.yml --- .github/workflows/sharedmemtest-c++.yml | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sharedmemtest-c++.yml b/.github/workflows/sharedmemtest-c++.yml index 1dcc8052..bd499974 100644 --- a/.github/workflows/sharedmemtest-c++.yml +++ b/.github/workflows/sharedmemtest-c++.yml @@ -1,9 +1,11 @@ name: shared-mem-test-c++ +env: + usingSharedMem: '' on: push: branches: - - 'main' + - 'sharedmem' jobs: @@ -29,4 +31,9 @@ jobs: - name: Build and run run: | - cd clients/c++ && ./build.sh && ./http-client model:4242 + cd clients/c++ && ./build.sh && checkShMem=$(./http-client model:4242 | grep -o "not accessible"); echo "CheckShMem=$checkShMem" >> $GITHUB_ENV + - + name: Check if Shared Memory is used + if: $CheckShMem == "not accessible" + run: | + echo "Shared Memory not available, failed"; exit 1 From dceda7d2f708098b2a6709a64474677e5ea06be0 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:31:53 +0200 Subject: [PATCH 17/21] Update sharedmemtest-c++.yml --- .github/workflows/sharedmemtest-c++.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sharedmemtest-c++.yml b/.github/workflows/sharedmemtest-c++.yml index bd499974..d94ce873 100644 --- a/.github/workflows/sharedmemtest-c++.yml +++ b/.github/workflows/sharedmemtest-c++.yml @@ -34,6 +34,6 @@ jobs: cd clients/c++ && ./build.sh && checkShMem=$(./http-client model:4242 | grep -o "not accessible"); echo "CheckShMem=$checkShMem" >> $GITHUB_ENV - name: Check if Shared Memory is used - if: $CheckShMem == "not accessible" + if: env.CheckShMem == "not accessible" run: | echo "Shared Memory not available, failed"; exit 1 From 05975ced1e6d6e436b8822f7049e8771f6bbf8de Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:33:18 +0200 Subject: [PATCH 18/21] Update sharedmemtest-c++.yml --- .github/workflows/sharedmemtest-c++.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sharedmemtest-c++.yml b/.github/workflows/sharedmemtest-c++.yml index d94ce873..2f67c66b 100644 --- a/.github/workflows/sharedmemtest-c++.yml +++ b/.github/workflows/sharedmemtest-c++.yml @@ -34,6 +34,6 @@ jobs: cd clients/c++ && ./build.sh && checkShMem=$(./http-client model:4242 | grep -o "not accessible"); echo "CheckShMem=$checkShMem" >> $GITHUB_ENV - name: Check if Shared Memory is used - if: env.CheckShMem == "not accessible" + if: env.CheckShMem == 'not accessible' run: | echo "Shared Memory not available, failed"; exit 1 From e6df0b2215eb3f48e8e33ad2ef7ca6327bd570aa Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:36:12 +0200 Subject: [PATCH 19/21] Update sharedmemtest-c++.yml --- .github/workflows/sharedmemtest-c++.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sharedmemtest-c++.yml b/.github/workflows/sharedmemtest-c++.yml index 2f67c66b..1df14f27 100644 --- a/.github/workflows/sharedmemtest-c++.yml +++ b/.github/workflows/sharedmemtest-c++.yml @@ -5,7 +5,7 @@ env: on: push: branches: - - 'sharedmem' + - 'main' jobs: From 0e77a6cde773aef84c77abdd6dc1b91787f79fc2 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:39:45 +0200 Subject: [PATCH 20/21] Update sharedmemtest-python.yml --- .github/workflows/sharedmemtest-python.yml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sharedmemtest-python.yml b/.github/workflows/sharedmemtest-python.yml index 47bf73d4..fbf60610 100644 --- a/.github/workflows/sharedmemtest-python.yml +++ b/.github/workflows/sharedmemtest-python.yml @@ -29,4 +29,10 @@ jobs: - name: Build and run run: | - cd clients/python && python3 umbridge-client.py http://model:4242 + cd clients/python && checkShMem=$(python3 umbridge-client.py http://model:4242 | grep -o "not accessible"); echo "CheckShMem=$checkShMem" >> $GITHUB_ENV + - + name: Check if Shared Memory is used + if: env.CheckShMem == 'not accessible' + run: | + echo "Shared Memory not available, failed"; exit 1 + From 87ac647491803ac94a4b74728ba7da2df22b1989 Mon Sep 17 00:00:00 2001 From: Kevin Heibel <57466458+Vogull@users.noreply.github.com> Date: Mon, 8 Apr 2024 15:56:48 +0200 Subject: [PATCH 21/21] Update um.py --- umbridge/um.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/umbridge/um.py b/umbridge/um.py index 5dbb8b97..633f978a 100755 --- a/umbridge/um.py +++ b/umbridge/um.py @@ -68,7 +68,8 @@ def __init__(self, url, name): raw_shmem_input[:] = testvec[0] shm_c_out = shared_memory.SharedMemory("/umbridge_test_shmem_out_" + str(tid), create=True, size=8) raw_shmem_output = np.ndarray(1, dtype=np.float64, buffer=shm_c_out.buf) - response = requests.post(f"{self.url}/TestShMem", json=input).json() + try: response = requests.post(f"{self.url}/TestShMem", json=input).json() + except: pass result = [] result.append(raw_shmem_output.tolist()[0]) shm_c_in.close()