diff --git a/merak/testscripts/TC/alcor_http_api_test.java b/merak/testscripts/TC/alcor_http_api_test.java new file mode 100644 index 0000000..564fcc2 --- /dev/null +++ b/merak/testscripts/TC/alcor_http_api_test.java @@ -0,0 +1,593 @@ +/* +MIT License +Copyright(c) 2020 Futurewei Cloud + Permission is hereby granted, + free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of the Software, and to permit persons + to whom the Software is furnished to do so, subject to the following conditions: + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +/* + This is the code testing the Alcor HTTP APIs, currently support APIs: +1. createVPC +2. createSubnet +3. createPort + +Params: +1. vpm_ip +2. vpm_port +3. snm_ip +4. snm_port +5. pm_ip +6. pm_port +7. vpc_cidr_slash +8. tenant_amount +9. project_amount_per_tenant +10. vpc_amount_per_project +11. subnet_amount_per_vpc +12. test_vpc_api = true +13. test_subnet_api = true +14. test_port_api = true +15. call_api_rate = 100 + + the number of ports will be based on the vpc_cidr_slash and subnet_amount_per_vpc, for example, if vpc_cidr_slash + is 8, then the network cidr becomes 10.0.0.0/8, which has 2^(32-8) IPs, and say we have subnet_amount_per_vpc = 1024, + which is 2^10, then each subnet will have 2^(32-8-10) = 16384 ports, minus the two IPs(first and last in subnet cidr) + reserved by Alcor. +*/ + +package com.futurewei.alcor.pseudo_controller.alcor_http_api_test; + +import com.google.common.util.concurrent.RateLimiter; +import inet.ipaddr.AddressStringException; +import inet.ipaddr.IPAddress; +import inet.ipaddr.IPAddressSeqRange; +import inet.ipaddr.IPAddressString; +import org.apache.http.HttpEntity; +import org.apache.http.HttpResponse; +import org.apache.http.client.HttpClient; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.DefaultHttpClient; +import org.apache.http.message.BasicHeader; +import org.apache.http.protocol.HTTP; +import org.apache.http.util.EntityUtils; +import org.json.simple.JSONArray; +import org.json.simple.JSONObject; +import org.json.simple.parser.JSONParser; +import org.springframework.beans.factory.annotation.Value; +import org.springframework.context.annotation.ComponentScan; +import org.springframework.stereotype.Component; + +import java.util.*; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +@Component +public class alcor_http_api_test { + @Value("${vpm_ip:192.168.0.0}") + String vpm_ip; + @Value("${vpm_port:1234}") + String vpm_port; + @Value("${snm_ip:192.168.0.0}") + String snm_ip; + @Value("${snm_port:1234}") + String snm_port; + @Value("${pm_ip:192.168.0.0}") + String pm_ip; + @Value("${pm_port:1234}") + String pm_port; + /* + vpc_cidr_slash, the number after the slash in the vpc CIDR, decides how big the VPC is, + such as 10.0.0.0/16 or 10.0.0.0/8. + */ + @Value("${vpc_cidr_slash:16}") + int vpc_cidr_slash; + /* + tenant_amount = concurrency when calling APIs. + */ + @Value("${tenant_amount:1}") + int tenant_amount; + /* + project_amount_per_tenant, each tenant can have multiple projects. + */ + @Value("${project_amount_per_tenant:1}") + int project_amount_per_tenant; + /* + vpc_amount_per_project, each project can have multiple VPCs. + each VPC can have the same CIDR + */ + @Value("${vpc_amount_per_project:1}") + int vpc_amount_per_project; + /* + subnet_amount_per_vpc, each VPC can have multiple subnets. + */ + @Value("${subnet_amount_per_vpc:2}") + int subnet_amount_per_vpc; + @Value("${test_vpc_api:false}") + Boolean test_vpc_api; + @Value("${test_subnet_api:false}") + Boolean test_subnet_api; + @Value("${test_port_api:false}") + Boolean test_port_api; + @Value("${call_api_rate:1}") + int call_api_rate; + public alcor_http_api_test(){} + + public void run_test_against_alcor_apis(){ + System.out.println("Beginning of alcor API test, need to generate: " + + tenant_amount + " tenants, \n" + + project_amount_per_tenant + " projects for each tenant, \n" + + vpc_amount_per_project + " VPCs for each project, \n" + + subnet_amount_per_vpc + " subnets for each VPC, \n"); + ArrayList tenant_uuids = new ArrayList<>(); + SortedMap> tenant_projects = new TreeMap<>(); + SortedMap> project_vpcs = new TreeMap<>(); + SortedMap> vpc_subnets = new TreeMap<>(); + SortedMap> subnet_ports = new TreeMap<>(); + + ArrayList vpc_port_ips = null; + int subnet_port_amount = 1; + for (int i = 0 ; i < tenant_amount ; i ++){ + String current_tenant_uuid = UUID.randomUUID().toString(); + tenant_uuids.add(current_tenant_uuid); + ArrayList current_tenant_projects = new ArrayList<>(); + for (int j = 0 ; j < project_amount_per_tenant ; j++){ + String current_project_id = UUID.randomUUID().toString(); + current_tenant_projects.add(current_project_id); + ArrayList vpcs_inside_a_project = new ArrayList<>(); + for (int k = 0 ; k < vpc_amount_per_project ; k ++){ + /* + If you set it to /8 you will get a out-of-memory error. + /12 gives you more than 2 ^ 20 ports in a VPC, which is + 1,048,576, without causing the out-of-memory error. + */ + String vpc_cidr = "10.0.0.0/" + vpc_cidr_slash; + JSONObject vpc_payload = new JSONObject(); + JSONObject network = new JSONObject(); + String current_vpc_id = UUID.randomUUID().toString(); + network.put("admin_state_up", true); + network.put("revision_number", 0); + network.put("cidr", vpc_cidr); + network.put("default", true); + network.put("description", "vpc-"+k); + network.put("dns_domain", "test-dns-domain"); + network.put("id", current_vpc_id); + network.put("is_default", true); + network.put("mtu", 1400); + network.put("name", "vpc-"+k); + network.put("port_security_enabled", true); + network.put("project_id", current_project_id); + vpc_payload.put("network", network); + vpcs_inside_a_project.add(vpc_payload); + + /* + 1. Generate all port IPs from VPC CIDR range. + 2. Divide port IPs into groups based on subnet_amount_per_vpc; + 3. Each group is a subnet, calculate subnet CIDR and form its subnet payload and ports payload + */ + if (null == vpc_port_ips){ + try { + System.out.println("Need to generate port IPs for the first time."); + IPAddressString whole_vpc_address = new IPAddressString(vpc_cidr); + // toSequentialRange() + // Returns the range of sequential addresses from the lowest address specified + // in this address string to the highest. + IPAddressSeqRange whole_vpc_address_range = whole_vpc_address.toSequentialRange(); + // stream() Returns a sequential stream of the individual address components. + Iterator range_iterator = (Iterator) whole_vpc_address_range.stream().iterator(); + + vpc_port_ips = new ArrayList<>(); + while (range_iterator.hasNext()){ + vpc_port_ips.add(range_iterator.next().toString()); + } + subnet_port_amount = (vpc_port_ips.size() / subnet_amount_per_vpc); + System.out.println("Finished generating port IPs. Each subnet should have " + subnet_port_amount + " ports"); + } catch (AddressStringException e) { + e.printStackTrace(); + } + } + /* + Create subnet payload based on vpc payload + */ + if (test_subnet_api && null != vpc_port_ips){ + ArrayList current_vpc_subnets = new ArrayList<>(); + System.out.println("Generating subnets"); + for (int l = 0 ; l < subnet_amount_per_vpc ; l ++){ + String subnet_start_ip = vpc_port_ips.get((l * subnet_port_amount) + 0); + String subnet_end_ip = vpc_port_ips.get((l * subnet_port_amount) + subnet_port_amount - 1); + IPAddressString subnet_start_ip_address_string = new IPAddressString(subnet_start_ip); + IPAddressString subnet_end_ip_address_string = new IPAddressString(subnet_end_ip); + /* + public IPAddress getAddress() + If this represents an ip address, returns that address. Otherwise, returns null. + This method will return null for invalid formats. + Use toAddress() for an equivalent method that throws exceptions for invalid formats. + + If you have a prefix address and you wish to get only the host without the prefix, use getHostAddress() + + Specified by: + getAddress in interface HostIdentifierString + Returns: + the address + */ + IPAddress subnet_start_ip_address = subnet_start_ip_address_string.getAddress(); + IPAddress subnet_end_ip_address = subnet_end_ip_address_string.getAddress(); + IPAddressSeqRange subnet_range = subnet_start_ip_address.toSequentialRange(subnet_end_ip_address); + /* + spanWithPrefixBlocks() + fix blocks that spans the same set of addresses. + */ + IPAddress blocks[] = subnet_range.spanWithPrefixBlocks(); + String subnet_cidr = blocks[0].toString(); + // System.out.println("Subnet cidr = " + subnet_cidr); + String current_subnet_id = UUID.randomUUID().toString(); + JSONObject subnet_payload = new JSONObject(); + JSONObject subnet = new JSONObject(); + subnet.put("cidr", subnet_cidr); + subnet.put("id", current_subnet_id); + subnet.put("ip_version", 4); + subnet.put("network_id", current_vpc_id); + subnet.put("name", "subnet"+l); + subnet_payload.put("subnet", subnet); + current_vpc_subnets.add(subnet_payload); + if (test_port_api){ + List subnet_port_ips = vpc_port_ips.subList((l * subnet_port_amount) + 0, (l * subnet_port_amount) + subnet_port_amount); + ArrayList current_subnet_ports = new ArrayList<>(); + // System.out.println("Generating ports for current subnet, it has " + subnet_port_ips.size() + " ports"); + for(String port_ip_in_subnet : subnet_port_ips){ + JSONObject port_payload = new JSONObject(); + JSONObject port = new JSONObject(); + port.put("admin_state_up", true); + port.put("description", "test_port"); + port.put("device_id", "test_device_id"); + port.put("device_owner", "compute:nova"); + port.put("fast_path", true); + JSONArray fixed_ips = new JSONArray(); + JSONObject subnet_fixed_ip = new JSONObject(); + subnet_fixed_ip.put("ip_address", port_ip_in_subnet); + subnet_fixed_ip.put("subnet_id", current_subnet_id); + fixed_ips.add(subnet_fixed_ip); + port.put("fixed_ips", fixed_ips); + port.put("id", UUID.randomUUID().toString()); + port.put("mac_learning_enabled", true); + port.put("network_id", current_vpc_id); + port.put("securi_enabled", true); + port.put("project_id", current_project_id); + port.put("revision_number", 0); + port.put("tenant_id", current_tenant_uuid); + port.put("uplink_status_propagation", true); + + port_payload.put("port", port); + current_subnet_ports.add(port_payload); + } + // System.out.println("Finished generating ports for subnet."); + subnet_ports.put(current_subnet_id, current_subnet_ports); + } + + } + System.out.println("Finished generating subnets for vpc."); + vpc_subnets.put(current_vpc_id, current_vpc_subnets); + } + } + project_vpcs.put(current_project_id, vpcs_inside_a_project); + } + tenant_projects.put(current_tenant_uuid, current_tenant_projects); + } +//////////////////////////////////////// + // System.out.println("Created JSON payloads for " + tenant_uuids.size() + " tenants, \neach tenant has " + // + tenant_projects.get(tenant_projects.firstKey()).size() + " projects, \neach project has " + // + project_vpcs.get(project_vpcs.firstKey()).size() + " vpcs, \neach vpc has " + // + (test_subnet_api ? vpc_subnets.get(vpc_subnets.firstKey()).size() : 0) + " subnets, \neach subnet has " + // + (test_port_api ? subnet_ports.get(subnet_ports.firstKey()).size() : 0) + " ports."); + + // System.out.println("Time to call those APIs! Calling APIs at " + call_api_rate + "/second"); + // // Maximum 100 API calls per second. + // RateLimiter rateLimiter = RateLimiter.create(call_api_rate); + // // Create a thread pool that has the same amount of threads as the rateLimiter + // ExecutorService concurrent_create_resource_thread_pool = Executors.newFixedThreadPool(call_api_rate); +////////////////////////////////////////// + if (test_vpc_api){ + System.out.println("Time to test VPC API!"); + ArrayList create_vpc_jobs = new ArrayList<>(); + for (String project_id : project_vpcs.keySet()) { + create_vpc_jobs.addAll(project_vpcs.get(project_id)); + } + + // int vpc_call_amount = create_vpc_jobs.size(); + // CountDownLatch latch = new CountDownLatch(vpc_call_amount); + // int latch_wait_seconds = (vpc_call_amount / call_api_rate) + 1; + // System.out.println("This VPC test will call createVPC API " + vpc_call_amount + + // " times, at the rate of " + call_api_rate + "/second, it will wait at most " + // + latch_wait_seconds + " seconds"); + // AtomicInteger create_vpc_success_count = new AtomicInteger(0); + // long call_vpc_api_start_time = System.currentTimeMillis(); + for (JSONObject vpc_job : create_vpc_jobs) { + // rateLimiter.acquire(); + String current_project_id = (String)((JSONObject)vpc_job.get("network")).get("project_id"); + String create_vpc_url = "http://" + vpm_ip + ":" + vpm_port + "/project/" + current_project_id + "/vpcs"; + // concurrent_create_resource_thread_pool.execute(() -> { + JSONObject create_vpc_response = call_post_api_with_json(create_vpc_url, vpc_job); + if (null != create_vpc_response && create_vpc_response.containsKey("network")){ + System.out.println("Created VPC successfully"); + // create_vpc_success_count.incrementAndGet(); + } + // latch.countDown(); + // }); + } + try { + if (test_subnet_api || test_port_api){ + /* + If we are testing subnet API or port API, we need to wait until the VPC is created. + */ + latch.await(600, TimeUnit.SECONDS); + }else{ + /* we actually don't need to wait latch_wait_seconds + because if we start the wait after the last call, we should actually wait for the last call. + So we will be waiting only 1 second at most. + */ + latch.await(/*latch_wait_seconds*/ 1, TimeUnit.SECONDS); + } + // long call_vpc_api_end_time = System.currentTimeMillis(); + // System.out.println("Total amount of calling createVPC API " + vpc_call_amount + + // " times, finished "+ ( vpc_call_amount - latch.getCount()) + // + " times, succeeded " + create_vpc_success_count.get() + " times, at the rate of " + // + call_api_rate + "/second, it took " + // + (call_vpc_api_end_time - call_vpc_api_start_time) + " milliseconds"); + } catch (InterruptedException e) { + System.err.println("Waited 60 seconds but can't get VPC response!"); + e.printStackTrace(); + } + } + + if (test_subnet_api){ + System.out.println("Time to test subnet API!"); + ArrayList create_subnet_jobs = new ArrayList<>(); + for (String vpc_id : vpc_subnets.keySet()) { + create_subnet_jobs.addAll(vpc_subnets.get(vpc_id)); + } + // CountDownLatch latch = new CountDownLatch(create_subnet_jobs.size()); + // int subnet_call_amount = create_subnet_jobs.size(); + // int latch_wait_seconds = (subnet_call_amount / call_api_rate) + 1; + // System.out.println("This subnet test will call createSubnet API " + subnet_call_amount + + // " times, at the rate of " + call_api_rate + "/second, it will wait at most " + // + latch_wait_seconds + " seconds"); + // AtomicInteger create_subnet_success_count = new AtomicInteger(0); + // long call_subnet_api_start_time = System.currentTimeMillis(); + for (JSONObject subnet_job : create_subnet_jobs) { + // rateLimiter.acquire(); + String current_project_id = (String)((JSONObject)subnet_job.get("subnet")).get("project_id"); + String create_subnet_url = "http://" + snm_ip + ":" + snm_port + "/project/" + current_project_id + "/subnets"; + concurrent_create_resource_thread_pool.execute(() -> { + JSONObject create_vpc_response = call_post_api_with_json(create_subnet_url, subnet_job); + if (null != create_vpc_response && create_vpc_response.containsKey("subnet")){ + // System.out.println("Created VPC successfully"); + create_subnet_success_count.incrementAndGet(); + } + latch.countDown(); + }); + } + try { + if (test_port_api){ + /* + If we are testing port API, we need to wait until the VPC is created. + */ + latch.await(600, TimeUnit.SECONDS); + }else{ + /* we actually don't need to wait latch_wait_seconds + because if we start the wait after the last call, we should actually wait for the last call. + So we will be waiting only 1 second at most. + */ + latch.await(/*latch_wait_seconds*/ 1, TimeUnit.SECONDS); + } + long call_subnet_api_end_time = System.currentTimeMillis(); + System.out.println("Total amount of calling createSubnet API " + subnet_call_amount + + " times, finished "+ ( subnet_call_amount - latch.getCount()) + + " times, succeeded " + create_subnet_success_count.get() + " times, at the rate of " + + call_api_rate + "/second, it took " + + (call_subnet_api_end_time - call_subnet_api_start_time) + " milliseconds"); + } catch (InterruptedException e) { + System.err.println("Waited 60 seconds but can't get subnet response!"); + e.printStackTrace(); + } + } + + if (test_port_api){ + System.out.println("Time to test port API!"); + ArrayList create_port_jobs = new ArrayList<>(); + for (String subnet_id : subnet_ports.keySet()) { + ArrayList current_subnet_ports = subnet_ports.get(subnet_id); + //remove the first and last IP, maybe those are reserved. + current_subnet_ports.remove(current_subnet_ports.size() - 1); + current_subnet_ports.remove(0); + create_port_jobs.addAll(current_subnet_ports); + } + CountDownLatch latch = new CountDownLatch(create_port_jobs.size()); + // int port_call_amount = create_port_jobs.size(); + // int latch_wait_seconds = (port_call_amount / call_api_rate) + 1; + // System.out.println("This port test will call createPort API " + port_call_amount + + // " times, at the rate of " + call_api_rate + "/second, it will wait at most " + // + latch_wait_seconds + " seconds"); + // AtomicInteger create_port_success_count = new AtomicInteger(0); + // long call_port_api_start_time = System.currentTimeMillis(); + for (JSONObject port_job : create_port_jobs) { + rateLimiter.acquire(); + String current_project_id = (String)((JSONObject)port_job.get("port")).get("project_id"); + String create_port_url = "http://" + pm_ip + ":" + pm_port + "/project/" + current_project_id + "/ports"; + concurrent_create_resource_thread_pool.execute(() -> { + JSONObject create_vpc_response = call_post_api_with_json(create_port_url, port_job); + if (null != create_vpc_response && create_vpc_response.containsKey("port")){ + // System.out.println("Created VPC successfully"); + create_port_success_count.incrementAndGet(); + } + latch.countDown(); + }); + } + try { + /* we actually don't need to wait latch_wait_seconds + because if we start the wait after the last call, we should actually wait for the last call. + So we will be waiting only 1 second at most. + */ + latch.await(/*latch_wait_seconds*/ 1, TimeUnit.SECONDS); + long call_port_api_end_time = System.currentTimeMillis(); + System.out.println("Total amount of calling createPort API " + port_call_amount + + " times, finished "+ ( port_call_amount - latch.getCount()) + + " times, succeeded " + create_port_success_count.get() + " times, at the rate of " + + call_api_rate + "/second, it took " + + (call_port_api_end_time - call_port_api_start_time) + " milliseconds"); + } catch (InterruptedException e) { + System.err.println("Waited 60 seconds but can't get port response!"); + e.printStackTrace(); + } + } + ////////////////////////////////////////////////////////////////////////////////////////////////////////// + /* + System.out.println("Try to call API to create a VPC!"); + JSONObject example_vpc_payload = new JSONObject(); + String project_id = "12345"; + String vpc_id = "54321"; + Boolean admin_state_up = true; + int revision_number = 0; + String cidr = "10.0.0.0/16"; + Boolean network_default = true; + String description = "test_description"; + String dns_domain = "test_dns_domain"; + Boolean is_default = true; + int mtu = 1400; + String name = "test_vpc"; + Boolean port_security_enabled = true; + + JSONObject network = new JSONObject(); + network.put("admin_state_up", admin_state_up); + network.put("revision_number", revision_number); + network.put("cidr", cidr); + network.put("default", network_default); + network.put("description", description); + network.put("dns_domain", dns_domain); + network.put("id", vpc_id); + network.put("is_default", is_default); + network.put("mtu", mtu); + network.put("name", name); + network.put("port_security_enabled", port_security_enabled); + network.put("project_id", project_id); + example_vpc_payload.put("network", network); + + String create_vpc_url = "http://" + vpm_ip + ":" + vpm_port + "/project/" + project_id + "/vpcs"; + JSONObject create_vpc_response = call_post_api_with_json(create_vpc_url, example_vpc_payload); + if (null != create_vpc_response){ + System.out.println("Create VPC response: \n" + create_vpc_response); + } + + String subnet_id = "112233"; + String subnet_cidr = "10.0.1.0/24"; + int subnet_ip_version = 4; + String network_id = vpc_id; + String subnet_name = "test_subnet"; + + JSONObject example_subnet_payload = new JSONObject(); + JSONObject subnet = new JSONObject(); + subnet.put("cidr", subnet_cidr); + subnet.put("id", subnet_id); + subnet.put("ip_version", subnet_ip_version); + subnet.put("network_id", network_id); + subnet.put("name", subnet_name); + + example_subnet_payload.put("subnet", subnet); + String create_subnet_url = "http://" + snm_ip + ":" + snm_port + "/project/" + project_id + "/subnets"; + JSONObject create_subnet_response = call_post_api_with_json(create_subnet_url, example_subnet_payload); + String subnet_start_ip = null; + String subnet_end_ip = null; + JSONObject subnet_content = null; + JSONArray subnet_allocation_pools = null; + ArrayList port_ips = new ArrayList(); + if (null != create_subnet_response){ + System.out.println("Create subnet response: \n" + create_subnet_response); + subnet_content = (JSONObject) create_subnet_response.get("subnet"); + subnet_allocation_pools = (JSONArray) subnet_content.get("allocation_pools"); + JSONObject subnet_allocations_pool_first_element = (JSONObject) subnet_allocation_pools.get(0); + subnet_start_ip = (String) subnet_allocations_pool_first_element.get("start"); + subnet_end_ip = (String) subnet_allocations_pool_first_element.get("end"); + IPAddressString start = new IPAddressString(subnet_start_ip); + IPAddressString end = new IPAddressString(subnet_end_ip); + IPAddress start_addr = start.getAddress(); + IPAddress end_addr = end.getAddress(); + IPAddressSeqRange range = start_addr.toSequentialRange(end_addr); + Iterator range_iterator = (Iterator) range.stream().iterator(); + while (range_iterator.hasNext()){ + IPAddress port_ip_address = range_iterator.next(); + System.out.println("Current port IP: " + port_ip_address.toString()); + port_ips.add(port_ip_address.toString()); + } + ArrayList ports_json_objects = new ArrayList<>(); + for (String port_ip_address : port_ips){ + JSONObject example_port_payload = new JSONObject(); + JSONObject port = new JSONObject(); + port.put("admin_state_up", admin_state_up); + port.put("description", "test_port"); + port.put("device_id", "test_device_id"); + port.put("device_owner", "compute:nova"); + port.put("fast_path", true); + JSONArray fixed_ips = new JSONArray(); + JSONObject subnet_fixed_ip = new JSONObject(); + subnet_fixed_ip.put("ip_address", port_ip_address); + subnet_fixed_ip.put("subnet_id", subnet_id); + fixed_ips.add(subnet_fixed_ip); + port.put("fixed_ips", fixed_ips); + port.put("id", UUID.randomUUID().toString()); + port.put("mac_learning_enabled", true); + port.put("network_id", vpc_id); + port.put("securi_enabled", true); + port.put("project_id", project_id); + port.put("revision_number", 0); + port.put("tenant_id", project_id); + port.put("uplink_status_propagation", true); + + example_port_payload.put("port", port); + ports_json_objects.add(example_port_payload); + } + String create_port_url = "http://" + pm_ip + ":"+ pm_port + "/project/" + project_id + "/ports"; + if(ports_json_objects.size() > 0 ){ + JSONObject create_port_response = call_post_api_with_json(create_port_url, ports_json_objects.get(0)); + if (null != create_port_response){ + System.out.println("Create Port response: \n" + create_port_response); + } + } + } + */ + } + + private JSONObject call_post_api_with_json(String url, JSONObject parameter){ +// System.out.println("Calling URL: " + url); + JSONObject response_json = null; + HttpClient c = new DefaultHttpClient(); + HttpPost post = new HttpPost(url); + post.setHeader("Content-Type", "application/json"); + String result = ""; + try { + StringEntity s = new StringEntity(parameter.toString(), "utf-8"); + s.setContentEncoding(new BasicHeader(HTTP.CONTENT_TYPE, + "application/json")); + post.setEntity(s); + HttpResponse httpResponse = c.execute(post); + + // Get the input stream + HttpEntity response_entity = httpResponse.getEntity(); + + String json_string = EntityUtils.toString(response_entity); + + response_json = (JSONObject) new JSONParser().parse(json_string); + + }catch (Exception e){ + e.printStackTrace(); + } + return response_json; + } + +} \ No newline at end of file diff --git a/merak/testscripts/TC/application.properties b/merak/testscripts/TC/application.properties new file mode 100644 index 0000000..9071595 --- /dev/null +++ b/merak/testscripts/TC/application.properties @@ -0,0 +1,53 @@ +# MIT License +# Copyright(c) 2020 Futurewei Cloud +# +# Permission is hereby granted, +# free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of the Software, and to permit persons +# to whom the Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +### Whether to test NCM, or to test Alcor HTTP APIs ### +test_against_ncm = false + +### Test Controller NCM test Params ### +number_of_vpcs = 10 +ports_node_one = 100 +ports_node_two = 200 +node_one_ip = 127.0.0.1 +node_two_ip = 127.0.0.2 +ncm_ip = 127.0.0.3 +ncm_port = 321 +user_name = ubuntu +password = gfedcba +### 0 is concurrent, others are sequential ### +ping_mode = 1 +### 1 to do background ping, otherwise NOT to do background ping ### +background_ping = 1 +### 0 to create container and ping, otherwise NOT to create container and NOT to ping ### +create_container_and_ping = 1 + +### Test Controller Alcor HTTP APIs Test Params ### +vpm_ip = 127.0.0.1 +vpm_port = 8080 +snm_ip = 127.0.0.1 +snm_port = 8081 +pm_ip = 127.0.0.1 +pm_port = 8081 +vpc_cidr_slash = 8 +tenant_amount = 1 +project_amount_per_tenant = 1 +vpc_amount_per_project = 1 +subnet_amount_per_vpc = 1 +test_vpc_api = true +test_subnet_api = true +test_port_api = true +call_api_rate = 100 + +#####Spring health##### +management.health.redis.enabled=false \ No newline at end of file diff --git a/merak/testscripts/jinkins/README.md b/merak/testscripts/jinkins/README.md new file mode 100644 index 0000000..aba78cc --- /dev/null +++ b/merak/testscripts/jinkins/README.md @@ -0,0 +1,179 @@ +# MIT License +``` +Copyright(c) 2020 Futurewei Cloud +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files(the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +``` + +# Busybox Ping Test + +# In this README: +- [MIT License](#mit-license) +- [Busybox Ping Test](#busybox-ping-test) +- [In this README:](#in-this-readme) + - [Introduction](#introduction) + - [Key Features of Test Script](#key-features-of-test-script) + - [Directory Structure and Files](#directory-structure-and-files) + - [Test Configuration](#test-configuration) + - [Example](#example) + - [Preconditions](#preconditions) + - [Running Test](#running-test) + - [After Test Starts](#after-test-starts) + - [ACA on target hosts](#aca-on-target-hosts) + - [Troubleshooting](#troubleshooting) + - [Quick Start](#quick-start) + +## Introduction +This is an end to end test where we test two Busybox containers, hosted on same or different hosts, ping each other.The connectivity between the two Busybox containers is provided by Alcor services and Alcor Control Agent. + +This is a test document based on the K6 script rewritten by busybox Ping test + +## Key Features of Test Script + - Builds Alcor and docker images for all alcor services (optional). + - Stops and removes existing alcor services and starts the alcor services afresh. + - Stops any currently running ACA on target computers where Busybox containers are to be deployed. + - Clears any existing busybox containers on the target hosts. + - Checks if the Alcor Control Agents are running on the target hosts. + - Prepares the testcase using provided test payload in the configuration file and generate the goal state. + - Deploys two busybox containers on the target hosts and assigns the IP/MAC obtained from the goal state generated in previous step. + - Runs a ping command from one container to another. + +## Directory Structure and Files +alcor/scripts/test-automation +1. alcor_services.ini +2. ping_test.py +3. helper_functions.py +4. container_ops.py +5. prepare_payload.py +6. create_test_setup.py +7. create_test_cases.py + +## Test Configuration +Test configuration should be presented in the file alcor_services.ini. Configuration file has following sections: +1. [services]: Carries the list of alcor service folders, service names and the ports they use. +2. [AlcorControlAgents]: New line separated list of hosts over which ACA is running and on which Busybox containers will be deployed. +3. [test_info]: Carries the data necessary for creating the end goal states. +4. [vpc_info], [node_info], [subnet_info], [security_groups], [port_info]: These carry the test payload that is needed to generate the end goal state. + +## Example +You can configure the alcor services name, port number in the following way: +[services] +1. ignite = {"name":"ignite", "port":10800, "path":"/lib/ignite",Dockerfile"} +2. vpc_manager = {"name":"vpm", "port":9001} +3. subnet_manager = {"name":"snm", "port":9002} + +With the above configuration the ignite service will be run with the name 'ignite', +The vpc_manager is built from the Dockerfile located in services/vpc_manager folder and the container with the name vpm port 9001 is started. + +## Preconditions +Ensure that your target hosts, over which you plan to deploy the Busybox containers +1. Have Alcor Control Agent binaries located at /home/ubuntu/repos/aca/build/bin/AlcorControlAgent + +## Running Test +The main file for running the test is ping_test.py. It is a python script that can be run from command prompt in either of the following two ways: +``` +python3 busybox_ping_test.py +./busybox_ping_test.py +``` + +You can optionally provide the paramter "-b build" to build all the docker images of the alcor services. This step need not be followed for any subsequent tests, unless any changes are made in Alcor. + +## After Test Starts +1. It will stop, remove existing Alcor services (if present) and start them all (as listed in alcor_services.ini file) +2. Checks the target hosts if any Alcor Control Agent (ACA) is running. If yes, it is killed and ACA restarted. +3. Checks whether the ACAs are running on the targets. If found not running, the test stops. +4. Using the test info and payload provided in config file, generate the end goal states for two end nodes. +5. Deploy two busy box containers con1 and con2 on the target hosts and runs a ping command from one container to another. + +## ACA on target hosts +1. Following packages are required to build and run ACA. Install the following packages on target hosts. Though not mentioned below, installing these pacakges will require sudo permissions. +``` +openvswitch-switch +openvswitch-common +apache-pulsar-client +apache-pulsar-client-dev +``` + +2. The library 'openvswitch' is also required. This library can only be installed from source. Get a clone of this library from github and checkout 2.12 branch. +``` +https://github.com/openvswitch/ovs.git +``` +Install the following packages before building ovs +``` +make +autoconf +libtool +c-ares +``` +Now go to the ovs source and update the file 'configure.ac' and edit th line carrying LT_INIT to enable shared library creation before building: +* LT_INIT (enable_shared) +``` +./configure --prefix=/usr --localstatedir=/var --sysconfdir=/etc +make +make install. +``` + +3. After the successful installation of ovs, start the following services: +``` +sudo systemctl openvswitch-switch restart +sudo /usr/local/share/openvswitch/scripts/ovs-ctl start +``` +The script ovs-ctsl starts the services vsdb-server and ovs-vswitchd. + +4. If ACA or ovs services throw bridge related errors, clear the existing bridges for any given container on target hosts. The test script takes care of these itself. However, if you ever manually try to start ACA, following commands can be used to clear existing bridges. +``` + ovs-vsctl del-br br-tun + ovs-vsctl del-br br-int + ovs-docker del-ports br-int +``` + +5. Following commands can be used to diagnose the target node's ovs services and bridges: +``` +ovs-vsctl show +ovs-ofctl dump-flows br-tun +ovs-ofctl dump-flows br-int +``` + +## Troubleshooting +1) During the runing of test script, the user account 'ubuntu' from Alcor host will be making ssh connection to the target hosts. Ensure that user ubuntu has password less ssh access to the target hosts. Copy the contents of id_rsa.pub file of user 'ubuntu' (located at ~/.ssh) and paste into the file ~/.ssh/authorized_keys on target host. + +2) Often after running the tests from a terminal on the Alcor hosts leaves the stdout and stdin in an unknown state. You can fix it by running +``` +reset +``` +While typing reset command you will not be able to see it. But once run, the terminal is restored. + +3) While running the tests from Jenkins, it is essential that the jenkins user also has password less access to the target hosts. Easiest way to ensure that to copy the entire ~/.ssh folder of user 'ubuntu' on to the jenkins home directory, which is usually at /var/lib/jenkins. Ensure while copying that file attributes are preserved. +``` +cp -pr /home/ubuntu/.ssh /var/lib/jenkins +chown -R jenkins:jenkins /var/lib/jenkins/.ssh +``` +Go through the jenkins help file available in alcor-int repository to get addtional details on running tests through jenkins. + +4) If the tests ever fails due to errors from Alcor API calls then observe the output from http get request from these calls. Check the configuration in alcor_services.ini and redeploy by manaully calling these APIs. + + +## Quick Start +After making the necessary configuration file changes, run the script with following paramters to get started: +1. ./ping_test.py -b build + - build the alcor services and their docker images and + - runs the simple test case of two containers under same subnet and security group pinging each other. +2. ./ping_test.py -t 1 + - runs the test case of two busyboxy containers on two subnets and same security group +3. ./ping_test.py -t 2 + - runs the test case of two busybox containers on one subnet and two security groups + + + diff --git a/merak/testscripts/jinkins/alcor_services.ini b/merak/testscripts/jinkins/alcor_services.ini new file mode 100644 index 0000000..eac3427 --- /dev/null +++ b/merak/testscripts/jinkins/alcor_services.ini @@ -0,0 +1,106 @@ +[services] +ignite = {"name":"ignite", "port":10800, "path":"/lib/ignite.Dockerfile"} +# vpc_manager internal port : 9001 +vpc_manager = {"name":"vpm", "port":9009} + +# Segment handling can't be routed through API gateway but has to go +# directly to vpc_manager but other vpc related requests will have to +# go through API gateway. A segment_service is added as an alias of +# vpc_manager internal port for handling this situation. At present, ping +# test uses it to create default segment table. +segment_service = {"name" : "sgs", "port" : 9001} + +# subnet_manager internal port : 9002 +subnet_manager = {"name":"snm", "port":9009} + +# route_manager internal port : 9003 +route_manager = {"name":"rm", "port":9009} + +# private_ip_manager internal port : 9004 +private_ip_manager = {"name":"pim", "port":9009} + +# mac_manager (virtual mac manager) internal port : 9005 +mac_manager = {"name":"mm", "port":9009} + +# port_manager internal port : 9006 +port_manager = {"name":"pm", "port":9009} + +# This can't be routed through API GW +node_manager = {"name":"nm", "port":9007} + +# security_group_manager internal port : 9008 +security_group_manager = {"name":"sgm", "port":9009} + +api_gateway = {"name":"ag", "port":9009} + +# data_plane_manager internal port : 9010 +data_plane_manager = {"name":"dpm", "port":9009} + +# elastic_ip_manager internal port : 9011 +elastic_ip_manager = {"name":"eim", "port":9009} + +# quota_manager internal port : 9012 +quota_manager = {"name":"qm", "port":9009} + +# network_acl_manager internal port : 9013 +network_acl_manager = {"name":"nam", "port":9009} + +# network_config_manager internal port : 9014 +network_config_manager = {"name":"ncm", "port":9009} + +# gateway_manager internal port : 9015 +gateway_manager = {"name":"gm", "port":9009} + +[AlcorControlAgents] +node1 = 172.31.19.133 +node2 = 172.31.21.202 + +[test_setup] +vpc_id = "9192a4d4-ffff-4ece-b3f0-8d36e3d88001" +project_id = "3dda2801-d675-4688-a63f-dcda8d327f50" +tenant_id = "3dda2801-d675-4688-a63f-dcda8d327f50" +network_id = "9192a4d4-ffff-4ece-b3f0-8d36e3d88001" +cidr = "172.16.0.0/16" +node_id = ["1112a4d4-ffff-4ece-b3f0-8d36e3d85001", "1112a4d4-ffff-4ece-b3f0-8d36e3d85002"] +node_name = ["node1", "node2"] +subnet_id = "8182a4d4-ffff-4ece-b3f0-8d36e3d88001" +security_group_id = "3dda2801-d675-4688-a63f-dcda8d111111" +device_id = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001", "8182a4d4-ffff-4ece-b3f0-8d36e3d00002"] +port_name = ["port101", "port102"] +port_id = ["7122a4d4-ffff-5eee-b3f0-8d36e3d01101", "7122a4d4-ffff-5eee-b3f0-8d36e3d02201"] +ip_addrs = ["172.16.1.101", "172.16.1.102"] +container_names = ["con1", "con2"] + +[L3_AttachRouter_then_CreatePorts] +subnet_ids = ["8182a4d4-ffff-4ece-b3f0-8d36e3d88001", "8182a4d4-ffff-4ece-b3f0-8d36e3d88002"] +cidrs = ["172.16.1.0/24", "172.16.2.0/24"] +ip_addrs = ["172.16.1.101", "172.16.2.201"] +subnet_names = ["subnet1", "subnet2"] +device_ids = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001", "8182a4d4-ffff-4ece-b3f0-8d36e3d00002"] + +[L2_basic] +security_group_ids =["3dda2801-d675-4688-a63f-dcda8d111111", "3dda2801-d675-4688-a63f-dcda8d111112"] +sg_names= ["sg1","sg2"] +device_ids = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001", "8182a4d4-ffff-4ece-b3f0-8d36e3d00002"] + +[gateways] +gateway_info = [{"gw" : "172.16.1.1" , "ips" : ["172.16.1.101", "172.16.1.102"]}, {"gw" : "172.16.2.1", "ips" : ["172.16.2.201"]}] + +[vpc_info] +vpc_info = {"cidr":${test_setup:cidr},"id":${test_setup:vpc_id}, "project_id":${test_setup:project_id}} + +[node_info] +node_info = {"node_id":${test_setup:node_id}, "node_name":${test_setup:node_name}, "server_port":8080, "veth":"eth0"} + +[subnet_info] +subnet_info = {"cidr":${test_setup:cidr},"id":${test_setup:subnet_id},"ip_version":4,"network_id":${test_setup:network_id},"name":"subnet1","host_routes":[{"destination":"172.16.1.0/24","nexthop":"172.16.1.1"}]} + +[security_groups] +security_group_info = {"create_at":"string","description":"string","id":${test_setup:security_group_id},"name":"sg1","project_id":${test_setup:project_id},"security_group_rules":[],"tenant_id":${test_setup:tenant_id},"update_at":"string"} + +[port_info] +port_info = {"binding:host_id":${test_setup:node_name},"device_id":${test_setup:device_id},"fixed_ips":${test_setup:ip_addrs},"subnet_id":${test_setup:subnet_id},"id": ${test_setup:port_id},"name": ${test_setup:port_name},"network_id": ${test_setup:network_id},"project_id":${test_setup:project_id},"security_groups":${test_setup:security_group_id},"tenant_id":${test_setup:tenant_id}} + +[router_info] +router_info = {"name":"router1","owner":${test_setup:vpc_id},"network_id":${test_setup:network_id},"project_id":${test_setup:project_id},"security_groups":${test_setup:security_group_id},"tenant_id":${test_setup:tenant_id},"id":"11112801-d675-4688-a63f-dcda8d327f50"} + diff --git a/merak/testscripts/jinkins/build_aca.sh b/merak/testscripts/jinkins/build_aca.sh new file mode 100644 index 0000000..340beef --- /dev/null +++ b/merak/testscripts/jinkins/build_aca.sh @@ -0,0 +1,160 @@ +#! /bin/sh + +# Pull latest ACA code from specified repository, branch and commit +# and call aca_machine_init.sh to do the actual build. +# Emit success or failure so that the remote script learn about the status +# of the build. + + +GIT_REPO="" +GIT_BRANCH="" +GIT_COMMIT= +GIT_URL= +REMOTE_COMMIT= +LOCAL_COMMIT= +DO_FORCE=0 +DO_BUILD=0 + +# Get local repo/branch/commit info +git_check_current() { + LOCAL_INFO=`git log origin/$GIT_BRANCH | head -3` + LOCAL_COMMIT=`echo $LOCAL_INFO | awk '/^commit/ {print $2}'` + LOCAL_DATE=`echo $LOCAL_INFO | sed 's/^.*Date://' | awk '{print $1, $2, $3, $4, $5}'` +} + + +# Get remote repo/branch/commit info +# May need git fetch -all to be robust +git_check_remote() { + REMOTE_INFO=`git log origin/$GIT_BRANCH | head -3` + REMOTE_COMMIT=`echo $REMOTE_INFO | awk '/^commit/ {print $2}'` + REMOTE_DATE=`echo $REMOTE_INFO | sed 's/^.*Date://' | awk '{print $1, $2, $3, $4, $5}'` + + RSEC=`date --date="${REMOTE_DATE}" +%s` + LSEC=`date --date="${LOCAL_DATE}" +%s` + + DIFF=`echo $RSEC - $LSEC | bc` + if [ $DO_FORCE -eq 1 ]; then + DO_BUILD=1 + elif [ $DIFF -gt 0 ]; then + DO_BUILD=1 + fi +} + + +# Need to avoid getting stuck on stale modifications +git_reset() { + git reset --hard || { + echo "ERROR: git reset failed" + exit 1 + } + echo "Success: git reset" +} + + +# Fetch the specified repo +git_fetch() { + git fetch --force --tags $GIT_URL || { + echo "ERROR: git fetch failed" + exit 1 + } + + git config remote.origin.url $GIT_URL + git pull +} + + +# checkout the specified branch (add commit later). +git_checkout() { + git checkout $GIT_BRANCH || { + echo "git checkout $GIT_BRANCH failed" + exit 1 + } + + echo "Success: git checkout $GIT_BRANCH" +} + + +# Start the build on the node. +# Waits for remote builds to finish. +do_build() { + cd build + sed -e '/^[\t ]*nohup[\t ][\t ]*$BUILD\/bin\/AlcorControlAgent /d' \ + ./aca-machine-init.sh > ./aca-machine-init-jenkins.sh + chmod +x ./aca-machine-init-jenkins.sh + D1=`date +%s` + echo "Started build in `pwd`..." + rm -f ../CMakeCache.txt ../cmake_install.cmake > /dev/null 2>&1 + sudo ./aca-machine-init-jenkins.sh > /tmp/amij.log 2>&1 + D2=`date +%s` + echo "Build finished in `expr $D2 - $D1` seconds, waiting a little..." + OSZ=0 + while :; do + NSZ=`ls -l /tmp/amij.log | awk '{print $5}'` + if [ $NSZ -eq $OSZ ]; then + break + else + OSZ=$NSZ + sleep 5 + fi + done + + if fgrep "Built target AlcorControlAgent" /tmp/amij.log > /dev/null 2>&1; then + echo "Success: ACA machine init" + exit 0 + else + echo "Failure: ACA machine init" + exit 1 + fi +} + +if [ $# -lt 4 ]; then + echo "$0 repo branch commit force {0|1}" + echo "Failure: ACA machine init" + exit 1 +fi + +echo "build_aca started on `uname`" + +GIT_REPO=$1 +GIT_BRANCH=$2 +GIT_COMMIT=$3 + +if [ $GIT_COMMIT = "HEAD" ]; then + GIT_COMMIT= +fi + +DO_FORCE=$4 + +if [ "$GIT_REPO" != "futurewei-cloud" -o "$BIT_BRANCH" != "master" -o -n "$GIT_COMMIT" ]; then + echo "Can't check status of remote repository other than" + echo "futurewei-cloud, forcing a build" + DO_FORCE=1 +fi + +GIT_URL="https://github.com/$GIT_REPO/alcor-control-agent" + +echo "build_aca using: +GIT_URL = $GIT_URL +GIT_BRANCH = $GIT_BRANCH +GIT_COMMIT = $GIT_COMMIT +FORCED_BUILD = $DO_FORCE +" + +git_check_current + +if [ $DO_FORCE -eq 1 ]; then + git_checkout + git_reset + git_fetch +fi + +git_check_remote + +if [ $DO_BUILD -eq 0 ]; then + echo "Skipping the build" + echo "Success: ACA machine init" + exit 0 +else + do_build +fi diff --git a/merak/testscripts/jinkins/container_ops.py b/merak/testscripts/jinkins/container_ops.py new file mode 100644 index 0000000..62e0d78 --- /dev/null +++ b/merak/testscripts/jinkins/container_ops.py @@ -0,0 +1,101 @@ +#!/usr/bin/python3 + +# MIT License +# Copyright(c) 2020 Futurewei Cloud +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +from helper_functions import * +from termcolor import colored + +def busybox_container_cleanup(aca_node_ip, con): + print("Cleaning up busybox container", con) + command = "sudo ovs-docker del-ports br-int {}".format(con) + output = run_command_on_host(aca_node_ip, command) + print("Cleanup task: ", command, "\n", output, "\n") + command = "sudo docker container stop {}".format(con) + output = run_command_on_host(aca_node_ip, command) + print("Cleanup task: ", command, "\n", output, "\n") + command = "sudo docker container rm {} -f ".format(con) + output = run_command_on_host(aca_node_ip, command) + print("Cleanup task: ", command, "\n", output, "\n") + command = "sudo ovs-vsctl del-br br-tun" + output = run_command_on_host(aca_node_ip, command) + print("Cleanup task: ", command, "\n", output, "\n") + command = "sudo ovs-vsctl del-br br-int" + output = run_command_on_host(aca_node_ip, command) + print("Cleanup task: ", command, "\n", output, "\n") + command = "sudo /usr/local/share/openvswitch/scripts/ovs-ctl start" + output = run_command_on_host(aca_node_ip, command) + print("Cleanup task: ", command, "\n", output, "\n") + print("SUCCESS: busybox cleanup") + + +def busybox_container_deploy(target_ips, ip_mac_db, container_names): + index = 0; + size = len(container_names) + for db_ip, db_mac in ip_mac_db.items(): + con = container_names[index] + aca_ip = target_ips[index] + index = index + 1 + command1 = "sudo docker run -itd --name " + con + " --net=none busybox sh" + command2 = "sudo ovs-docker add-port br-int eth1 " + con + " --ipaddress=" + db_ip + "/24" + " --macaddress=" + db_mac + command3 = "sudo ovs-docker set-vlan br-int eth1 " + con + " 1" + print("Deploying busybox " + con + " on " + aca_ip) + gw = get_gateway_for_ip(db_ip) + command4 = "sudo docker exec -u root --privileged -it " + con + " route add default gw " + gw + output = run_command_on_host(aca_ip, command1) + print(con, "deploy task: ", output, "\n") + + output = run_command_on_host(aca_ip, command2) + print(con, "deploy task: ", output, "\n") + + output = run_command_on_host(aca_ip, command3) + print(con, "deploy task: ", output, "\n") + output = run_command_on_host(aca_ip, command4) + print(con, "add default gw : ", output, "\n") + print("SUCCESS: deploying busybox " + con + " on " + aca_ip) + + ip_addrs = list(ip_mac_db.keys()) + + +def run_ping_test(target_machines, ip_addrs, container_names): + index_0 = 0 + index_1 = 1 + ping_counts = 2 + + dest1 = ip_addrs[index_1] + dest2 = ip_addrs[index_0] + ping_0_to_1 = "sudo docker exec -it " + container_names[index_0] + " ping -c " + str(ping_counts) + " " + dest1 + ping_1_to_0 = "sudo docker exec -it " + container_names[index_1] + " ping -c " + str(ping_counts) + " " + dest2 + + HOST = target_machines[index_0] + print("Ping test on {} to {}".format(HOST, dest1)) + output1 = run_command_on_host(HOST, ping_0_to_1) + print("run_ping_test: {} to {} result: ".format(HOST, dest1, output1)) + + HOST = target_machines[index_1] + print("Ping test on {} to {}".format(HOST, dest2)) + output2 = run_command_on_host(HOST, ping_1_to_0) + print("run_ping_test: {} to {} result: ".format(HOST, dest2, output2)) + + expected_output = "2 packets transmitted, 2 packets received" + if expected_output in str(output1) and expected_output in str(output2): + print (colored("PING TEST SUCCESSFULL", 'green')) + return 0 + else: + print(colored('PING TEST FAILED', 'red')) + return 1 + diff --git a/merak/testscripts/jinkins/create_test_cases.py b/merak/testscripts/jinkins/create_test_cases.py new file mode 100644 index 0000000..c33bfe1 --- /dev/null +++ b/merak/testscripts/jinkins/create_test_cases.py @@ -0,0 +1,240 @@ +#!/usr/bin/python3 + +# MIT License +# Copyright(c) 2020 Futurewei Cloud +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +from create_test_setup import * + +# test_case is the scenario section header, must contain +# all of the subnet information. Different scenarios may use +# the same test_case setup so to avoid confusion, passin +# the scenario name which will appear in the test output. +def create_subnets(snm_port, test_case, scenario): + print("creating subnets for scenario {}".format(scenario)) + subnet_info = read_config_file_section(test_case) + id_list = json.loads(subnet_info['subnet_ids']) + id_names = json.loads(subnet_info['subnet_names']) + device_ids = json.loads(subnet_info['device_ids']) + cidrs = json.loads(subnet_info['cidrs']) + ip_addrs = json.loads(subnet_info['ip_addrs']) + subnet_info = read_config_file_section("subnet_info") + subnetinfo = json.loads(subnet_info['subnet_info']) + url = 'http://localhost:{}/project/{}/subnets'.format(snm_port, get_projectid()) + subnet = {} + for cidr, id, name in zip(cidrs, id_list, id_names): + subnetinfo['cidr'] = cidr + subnetinfo['id'] = id + subnetinfo['name'] = name + subnet["subnet"] = subnetinfo + post_httprequest(url, subnet) + print("verifying created subnets") + print(get_httprequest(url)) + print("SUCCESS: creating subnets for {}".format(scenario)) + return id_list,device_ids,ip_addrs + + + +def create_security_groups(port): + print("Creating security groups") + security_groups ={} + url = 'http://localhost:{}/project/{}/security-groups'.format(port, get_projectid()) + sg_info = read_config_file_section("security_groups") + sginfo = json.loads(sg_info['security_group_info']) + + security_groups_info = read_config_file_section("L2_basic") + id_list = json.loads(security_groups_info['security_group_ids']) + id_names = json.loads(security_groups_info['sg_names']) + device_ids = json.loads(security_groups_info['device_ids']) + for name, id in zip(id_names, id_list): + sginfo['id'] = id + sginfo['name'] = name + security_groups["security_group"] = sginfo + print("SG ", security_groups) + post_httprequest(url, security_groups) + print("SUCCESS: creating security groups") + return id_list,device_ids + + +def attach_subnets_to_router(rm_port, snm_port, router_id, subnet_id_list): + url = 'http://localhost:{}/project/{}/routers/{}/add_router_interface'.format(rm_port, get_projectid(),router_id) + print("Attaching subnets to router") + + for id in subnet_id_list: + subnet_info = {"subnet_id":id} + put_httprequest(url, subnet_info) + req="http://localhost:{}/project/{}/routers".format(rm_port, get_projectid()) + print("Attached router info") + print(get_httprequest(req)) + print("SUCCESS: attaching subnets to router") + +# Test case 1: L2 Basic +# Two nodes in same subnet in different seurity groups +def prepare_test_L2_basic(ip_mac, ser_port): + test_name = "L2_basic" + print("Preparing test case {}...".format(test_name)) + serv = read_config_file_section("services") + create_default_segment_table(ser_port["sgs"]) + create_vpc(ser_port["vpm"]) + create_node(ser_port["nm"], ip_mac) + create_subnet(ser_port["snm"]) + id_list,device_ids = create_security_groups(ser_port["sgm"]) + change_ports = {"change":["security_groups","device_id"], "security_groups":id_list,"device_ids":device_ids} + create_port_goal_states(ser_port["pm"], change_ports) + ip_mac_db = get_mac_from_db() + print("Test case {}. IP/MAC in ignite DB: ".format(test_name, ip_mac_db)) + print("SUCCESS: preparing test case {}...".format(test_name)) + return ip_mac_db + +# Test case 2: L3_AttachRouter_then_CreatePorts (S4) +# Two nodes in different subnets, in same same sg +# Order of network element creation is: +# 1) Create default segement table +# 2) Create nodes +# 3) Create VPC +# 4) Create security group +# 5) Create create subnets +# 6) Create router +# 7) Attach subnets to router +# 8) Create ports +def prepare_test_L3_AttachRouter_then_CreatePorts(ip_mac, ser_port): + test_name = "L3_AttachRouter_then_CreatePorts" + print("Preparing test case {}...".format(test_name)) + serv = read_config_file_section("services") + create_default_segment_table(ser_port["sgs"]) + create_node(ser_port["nm"], ip_mac) + change = {'change':'cidr','cidr':"172.16.0.0/16"} + create_vpc(ser_port["vpm"], change) + create_security_group(ser_port["sgm"]) + + # create router + router_id =create_router_interface(ser_port["rm"]) + get_vpcs(ser_port["vpm"]) + + # create subnets + # Relevant subnet info from L3_AttachRouter_then_CreatePorts (S4) + id_list, device_ids, ip_addrs = create_subnets(ser_port["snm"], test_name, "S4") + + # attach subnets to router + attach_subnets_to_router(ser_port["rm"], ser_port["snm"], router_id, id_list) + get_subnets(ser_port["snm"]) + change_ports = {"change":["subnet_id","device_id","ip_addrs"],"subnet_id":id_list,"device_ids":device_ids,"ip_addrs":ip_addrs} + create_port_goal_states(ser_port["pm"], change_ports) + + ip_mac_db = get_mac_from_db() + print("Test {}. IP/MAC in ignite DB: ".format(test_name, ip_mac_db)) + print("SUCCESS: preparing test case {}...".format(test_name)) + return ip_mac_db + + +# test case 3: L3_CreatePorts_then_AttachRouter (S5) +# Two nodes in different subnets and same security group but +# Order of network element creation is: +# 1) Create default segement table +# 2) Create nodes +# 3) Create VPC +# 4) Create security group +# 5) Create create subnets +# 6) Create ports +# 7) Create router +# 8) Attach subnets to router +def prepare_test_L3_CreatePorts_then_AttachRouter(ip_mac, ser_port): + test_name = "L3_CreatePorts_then_AttachRouter" + print("Preparing test case {}...".format(test_name)) + serv = read_config_file_section("services") + create_default_segment_table(ser_port["sgs"]) + create_node(ser_port["nm"], ip_mac) + change = {'change':'cidr','cidr':"172.16.0.0/16"} + create_vpc(ser_port["vpm"], change) + get_vpcs(ser_port["vpm"]) + create_security_group(ser_port["sgm"]) + + # create subnets + # Relevant subnet info from L3_AttachRouter_then_CreatePorts (S4) + id_list, device_ids, ip_addrs = create_subnets(ser_port["snm"], "L3_AttachRouter_then_CreatePorts", "S5") + get_subnets(ser_port["snm"]) + + # create ports + change_ports = {"change":["subnet_id","device_id","ip_addrs"],"subnet_id":id_list,"device_ids":device_ids,"ip_addrs":ip_addrs} + create_port_goal_states(ser_port["pm"], change_ports) + + # create router + router_id = create_router_interface(ser_port["rm"]) + + # attach subnets to router + attach_subnets_to_router(ser_port["rm"], ser_port["snm"], router_id, id_list) + + ip_mac_db = get_mac_from_db() + print("Test case {}. IP/MAC in ignite DB: ".format(test_name, ip_mac_db)) + print("SUCCESS: preparing test case {}...".format(test_name)) + return ip_mac_db + +def create_port_goal_states(port, change_ports): + print("Creating goal state...") + url= 'http://localhost:{}/project/{}/ports'.format(port,get_projectid()) + port_info = read_config_file_section("port_info") + port_dict = port_info['port_info'] + port_dict = json.loads(port_dict, strict=False) + port_name = port_dict['name'] + port_id = port_dict['id'] + node_name = port_dict['binding:host_id'] + subnet_ids = [] + security_groups = [] + changes = change_ports['change'] + device_ids = [] + if 'subnet_id' in changes: + subnet_ids = change_ports['subnet_id'] + else: + # Adding the same subnet ID twice because it is going to be same for two ports we are creating + subnet_ids.append(port_dict['subnet_id']) + subnet_ids.append(port_dict['subnet_id']) + + + if 'device_id' in changes: + device_ids = change_ports['device_ids'] + else: + print("Adding same device id twice...") + # Adding the same device ID twice because it is going to be same for two ports we are creating + device_ids.append(port_dict['device_id']) + device_ids.append(port_dict['device_id']) + + + if 'security_groups' in changes: + security_groups = change_ports['security_groups'] + else: + # Adding the same security group ID twice because it is going to be same for two ports we are creating + security_groups.append(port_dict['security_groups']) + security_groups.append(port_dict['security_groups']) + + + if 'ip_addrs' in changes: + ip_addrs = change_ports['ip_addrs'] + else: + ip_addrs = port_dict['fixed_ips'] + + + for index in range(len(ip_addrs)): + ports = {} + port_info = {"admin_state_up":True, + "allowed_address_pairs": + [{"ip_address":"11.11.11.11", + "mac_address":"00-AA-BB-15-EB-3F"}], + "binding:host_id":node_name[index],"binding:vif_details":{},"create_at":"string","description": "string","device_id":device_ids[index],"device_owner": "compute:nova","dns_assignment": {},"dns_domain": "string","dns_name": "string","extra_dhcp_opts": [{"ip_version": "string","opt_name":"string","opt_value": "string"}],"fast_path": True,"fixed_ips":[{"ip_address": ip_addrs[index],"subnet_id":subnet_ids[index]}],"id": port_id[index],"mac_learning_enabled": True,"name": port_name[index],"network_id": port_dict['network_id'],"network_ns": "string","port_security_enabled": True,"project_id":port_dict['project_id'],"qos_network_policy_id": "string","qos_policy_id": "string","revision_number": 0,"security_groups": [security_groups[index]],"tags": ["string"],"tenant_id":port_dict['tenant_id'],"update_at": "string","uplink_status_propagation": True,"veth_name":"string"} + ports["port"] = port_info + print(ports, url) + print("Posting goal state") + post_httprequest(url, ports) + print("SUCCESS: creating goal state...") diff --git a/merak/testscripts/jinkins/create_test_setup.py b/merak/testscripts/jinkins/create_test_setup.py new file mode 100644 index 0000000..4315dc3 --- /dev/null +++ b/merak/testscripts/jinkins/create_test_setup.py @@ -0,0 +1,159 @@ +#!/usr/bin/python3 + +# MIT License +# Copyright(c) 2020 Futurewei Cloud +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import requests +import time +import json +from prepare_payload import * + +def create_default_segment_table(port): + print("Create default segment table") + url ='http://localhost:{}/segments/createDefaultTable'.format(port) + time.sleep(3) + post_httprequest(url) + print("SUCCESS: Created default segment table\n") + + +def create_node(port, ip_mac): + print("Creating nodes") + url= 'http://localhost:{}/nodes'.format(port) + data = {} + node_info = read_config_file_section("node_info") + node_dict = node_info['node_info'] + nodeinfo = json.loads(node_dict) + node_name = nodeinfo['node_name'] + node_id = nodeinfo['node_id'] + + for key, value in ip_mac.items(): + key_index = list(ip_mac).index(key) if key in ip_mac else None + node_info = {"local_ip":str(key), "mac_address":str(value), "node_id":node_id[key_index], "node_name":node_name[key_index], "server_port":nodeinfo['server_port'], "veth":nodeinfo['veth']} + data["host_info"] = node_info + post_httprequest(url, data) + print("SUCCESS: Created nodes\n") + + +def create_router_interface(port): + print("Creating router interface") + router={} + url = 'http://localhost:{}/project/{}/routers'.format(port, get_projectid()) + router_info = read_config_file_section("router_info") + router_dict = router_info['router_info'] + routerinfo = json.loads(router_dict) + route_info = {"admin_state_up": True,"availability_zone_hints": ["string"], "availability_zones": ["string"],"conntrack_helpers": ["string"],"description": "string","distributed": True,"external_gateway_info": {"enable_snat": True,"external_fixed_ips": [ ],"network_id": routerinfo['network_id']},"flavor_id": "string","gateway_ports": [ ], "ha": True,"id":routerinfo['id'] ,"name": routerinfo['name'],"owner": routerinfo['owner'], "project_id":routerinfo['project_id'],"revision_number": 0,"routetable": {},"service_type_id": "string","status": "BUILD","tags": ["string"],"tenant_id": routerinfo['tenant_id']} + router['router'] = route_info + post_httprequest(url, router) + print("SUCCESS: Created router interface\n") + return routerinfo['id'] + +# Second parameter is to indicate if the call is made for base test case or any other test case. +def create_vpc(port, change={}): + print("Creating VPC") + network = {} + url = 'http://localhost:{}/project/{}/vpcs'.format(port, get_projectid()) + network_info = read_config_file_section("vpc_info") + network_dict = network_info['vpc_info'] + networkinfo = json.loads(network_dict) + if('change' in change): + networkinfo[change['change']] = change[change['change']] + network_info = {"admin_state_up":True, "revision_number":0, "cidr":networkinfo['cidr'], "default":True, "description":"vpc", "dns_domain":"domain", "id":networkinfo['id'], "is_default":True, "mtu":1400, "name":"sample_vpc", "port_security_enabled":True, "project_id":networkinfo['project_id']} + print(network_info) + network["network"] = network_info + post_httprequest(url, network) + print("SUCCESS: Created VPC\n") + + +def create_subnet(port): + print("Creating Subnet") + subnet = {} + url = 'http://localhost:{}/project/{}/subnets'.format(port, get_projectid()) + subnet_info = read_config_file_section("subnet_info") + subnetinfo = json.loads(subnet_info['subnet_info']) + subnet["subnet"] = subnetinfo + print("Posting subnet", subnet) + post_httprequest(url, subnet) + print("SUCCESS: Creating Subnet\n") + + +def create_security_group(port): + print("Creating security group") + security_groups = {} + url = 'http://localhost:{}/project/{}/security-groups'.format(port, get_projectid()) + sg_info = read_config_file_section("security_groups") + sginfo = json.loads(sg_info['security_group_info']) + security_groups["security_group"]=sginfo + post_httprequest(url,security_groups) + print("SUCCESS: Created security group\n") + + +def get_subnets(port): + url = 'http://localhost:{}/project/{}/subnets'.format(port, get_projectid()) + print(get_httprequest(url)) + + +def get_nodes(port): + url= 'http://localhost:{}/nodes'.format(port) + print(get_httprequest(url)) + + +def get_vpcs(port): + url= 'http://localhost:{}/project/{}/vpcs'.format(port,get_projectid()) + print(get_httprequest(url)) + + +def get_ports(port): + url= 'http://localhost:{}/project/{}/ports'.format(port,get_projectid()) + print(get_httprequest(url)) + + +def create_ports(port): + print("Creating Goal State Ports") + url= 'http://localhost:{}/project/{}/ports'.format(port, get_projectid()) + port_info = read_config_file_section("port_info") + port_dict = port_info['port_info'] + port_dict = json.loads(port_dict, strict=False) + port_name = port_dict['name'] + port_id = port_dict['id'] + ip_addrs = port_dict['fixed_ips'] + node_name = port_dict['binding:host_id'] + device_id = port_dict['device_id'] + + for index in range(len(ip_addrs)): + ports = {} + port_info = {"admin_state_up":True,"allowed_address_pairs":[{"ip_address":"11.11.11.1","mac_address":"00-AA-BB-15-EB-3F"}],"binding:host_id":node_name[index],"binding:vif_details":{},"create_at":"string","description":"string","device_id":device_id[index],"device_owner":"compute:nova","dns_assignment":{},"dns_domain":"string","dns_name":"string","extra_dhcp_opts":[{"ip_version":"string","opt_name":"string","opt_value":"string"}],"fast_path": True,"fixed_ips":[{"ip_address":ip_addrs[index],"subnet_id":port_dict['subnet_id']}],"id":port_id[index],"mac_learning_enabled":True,"name":port_name[index],"network_id":port_dict['network_id'],"network_ns": "string","port_security_enabled":True,"project_id":port_dict['project_id'],"qos_network_policy_id":"string","qos_policy_id":"string","revision_number":0,"security_groups":[port_dict['security_groups']],"tags":["string"],"tenant_id":port_dict['tenant_id'],"update_at":"string","uplink_status_propagation":True,"veth_name":"string"} + ports["port"] = port_info + post_httprequest(url, ports) + + print("SUCCESS: Created Goal State Ports\n") + + +def create_test_setup(ip_mac, config_file_object): + print("Calling Alcor APIs to generate Goal States") + services = dict(config_file_object.items("services")) + service_port_map = get_service_port_map(services) + + create_default_segment_table(service_port_map["sgs"]) + create_vpc(service_port_map["vpm"]) + create_node(service_port_map["nm"], ip_mac) + create_subnet(service_port_map["snm"]) + create_security_group(service_port_map["sgm"]) + create_ports(service_port_map["pm"]) + + ip_mac_db = get_mac_from_db() + print("Goal State IP/MACs: ", ip_mac_db) + return ip_mac_db diff --git a/merak/testscripts/jinkins/helper_functions.py b/merak/testscripts/jinkins/helper_functions.py new file mode 100644 index 0000000..c7f15b8 --- /dev/null +++ b/merak/testscripts/jinkins/helper_functions.py @@ -0,0 +1,299 @@ +#!/usr/bin/python3 + +# MIT License +# Copyright(c) 2020 Futurewei Cloud +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import subprocess as sp +import sys, os, configparser +import json,time +from subprocess import * + +ALCOR_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) + +def make_docker_command(*argv): + command ='sudo docker ' + for arg in argv: + command += arg + return command + + +def get_file_list(mypath): + print(mypath) + onlyfiles = os.listdir(mypath) + return onlyfiles + +def get_aca_pid_and_kill(HOST,output): + print(output) + kill_status = False + pid=[] + if output: + for x in output.split(): + print("Pid is ",x) + if x !='None': + print(type(x),x,"x is not None inside") + COMMAND = 'sudo kill -9 {}'.format(int(x)) + output = run_command_on_host(HOST,COMMAND) + else: + print("None pid") + if not output: + kill_status = True + return kill_status + + +def kill_running_aca(HOST): + #COMMAND = "sudo ps aux | grep {}".format("AlcorControlAgent") + COMMAND = 'sudo pidof {}'.format("AlcorControlAgent") + output = run_command_on_host(HOST,COMMAND) + #print("OOO",output) + if get_aca_pid_and_kill(HOST,str(output)) == True: + return True + else: + return False + +def restart_alcor_agents(aca,path): + for HOST in aca.values(): + COMMAND = 'sudo {} -d'.format(path) + print(COMMAND) + ssh1 = sp.Popen(['ssh', + '-t','{}@{}'.format(get_username(), HOST), COMMAND],shell=False,stdout=sp.PIPE, + stderr=sp.PIPE, encoding='utf8') + + output1 = ssh1.poll() + print(aca,"Restart output1 ",output1) + time.sleep(2) + output2 = ssh1.poll() + print(aca,"Restart output2 ",output2) + if output1 == 255 or output2 == 255: + return False + else: + return True + +# Check on a given HOST if a given process is running +# Return True/False accordingly +def check_process_running(HOST, process): + running = False + COMMAND = 'ps -ef | grep -I {}'.format(process) + output = run_command_on_host(HOST, COMMAND) + if(output): + for line in output.split('\n'): + line = line.strip() + if not 'grep' in line: + if(line): + running = True + return running + + +# Get the mac address of given host +def get_mac_id(HOST): + cmd = "ifconfig | grep -A 2 {} | tail -1".format(HOST) + output = run_command_on_host(HOST, cmd) + addr_string = str(output).split() + mac_addr = addr_string[addr_string.index('ether') + 1] + return mac_addr + +def get_username(): + return 'ubuntu' + + +# Function to run a given command on a given host +# Returns output on success, otherwise prints error code +def run_command_on_host(HOST, COMMAND): + try: + print("run_command_on_host: U = {}, H = {}, C = {}".format(get_username(), HOST, COMMAND)) + ssh1 = sp.Popen(['ssh', + '-o StrictHostKeyChecking=no', + '-o UserKnownHostsFile=/dev/null', + '-tt', + '{}@{}'.format(get_username(), HOST), COMMAND], + shell=False, + stdout=sp.PIPE, + stderr=sp.PIPE, + encoding='utf8') + result = ssh1.communicate() + print("Remote output",result) + retcode = ssh1.returncode + if "Segmentation fault" in str(result): + print("segmentation fault") + sys.exit(1) + print("Remote: ", retcode) + if retcode > 0: + print(result[1],retcode) + if 'Connection to' not in result[1] and 'closed' not in result[1]: + print("ERROR: ", result[1]) + sys.exit(1) + else: + #print("In else ",result[0]) + return result[0] + except Exception as e: + print(e) + print("Exception thrown when running command {} on HOST {}:".format(COMMAND, HOST), sys.exc_info()[0]) + sys.exit(1) + + +def print_output(output): + for line in output.decode(encoding='utf-8').split('\n'): + line = line.strip() + print(line) + + +# check if all alcor services are up and running. +# Success: all ports from 9001 through 9016 should show up in netstat output +# try for 5 minutes, waiting 10 seconds each time +# NOTE: Update the port list when new services are added. +# 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9015 9016 +def check_alcor_services(): + wait_limit = 300 + sleep_time = 10 + wait_time = 0 + try: + command = "netstat -ant | awk -F: '/90[0-9][0-9]/ {print $4}' | sed 's/[\t ]*$//' | sort -n | tr '[\n]' ' ' | sed 's/[\t ]*$//'" + iter = 1 + while wait_time < wait_limit: + pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) + res = pipe.communicate() + retcode = pipe.returncode + if retcode > 0: + print("Failed to execute command", repr(str(command))) + print_output(res[1]) + elif "9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9014 9015" in str(res): + print("SUCCESS for: ", command, "\n") + return True + elif "9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9015 9016" in str(res): + print("SUCCESS for: ", command, "\n") + return True + iter = iter + 1 + wait_time = wait_time + sleep_time + time.sleep(sleep_time) + + return False + except: + print("ERROR", sys.exc_info()[0]) + return False + + + +def execute_command(command): + try: + pipe = sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, shell=True) + res = pipe.communicate() + retcode = pipe.returncode + if retcode > 0: + print("Failed to execute command", repr(str(command))) + print_output(res[1]) + else: + print("SUCCESS for: ", command, "\n") + # print output of the command when debugging + # print_output(res[0]) + return retcode + except: + print("ERROR", sys.exc_info()[0]) + + +def execute_commands(cmd, command_list): + print("Executing commands in given list\n") + status = True + for command in command_list: + print(cmd, ": ", str(command)) + if(execute_command(command)): + print("Failed to ", cmd, command) + status = False + return status + + +def dict_clean(dict): + result = {} + for key, value in dict: + if(value == 'null'): + value = 'None' + elif(value == 'true'): + value = 'True' + elif(value == 'false'): + value = 'False' + result[key]=value + return(result) + + +# Return project id from config file under section 'test_setup' +def get_projectid(): + test_setup = read_config_file_section("test_setup") + proj = test_setup["project_id"] + return proj.replace('"', '') + + +# Return container 'ip_addrs' from config file under test_setup section +def get_container_ips(): + test_setup = read_config_file_section("test_setup") + return test_setup["ip_addrs"] + + +# Return the given section from the config file as a dictionary +def read_config_file_section(section): + config = configparser.ConfigParser() + config._interpolation = configparser.ExtendedInterpolation() + conf_file = "{}/alcor_services.ini".format(ALCOR_TEST_DIR) + config.read(conf_file) + serv = dict(config.items(section)) + return serv + + +def read_config_file(config_file): + config = configparser.ConfigParser() + config.read(config_file) + return config + + +def get_service_port_map(serv): + service_list = {} + for service_name in serv.keys(): + service_info = json.loads(serv[service_name]) + service_list[service_info["name"]] = service_info["port"] + return service_list + +def get_macaddr_alcor_agents(aca): + ip_mac ={} + for ip_addr in aca.values(): + mac_addr = get_mac_id(ip_addr) + print("Mac_addr", mac_addr, "for host", ip_addr, "\n") + ip_mac[ip_addr] = mac_addr + return ip_mac + +#This function checks the 'AlcorControlAgent' running on a host and returns its mac address +def check_alcor_agents_running(aca): + for ip_addr in aca.values(): + if(check_process_running(ip_addr.strip(), "AlcorControlAgent") == True): + print("AlcorControlAgent is running on {}".format(ip_addr)) + if(kill_running_aca(ip_addr)== True): + print("Running Alcor agent on {} has been killed Successfully".format(ip_addr)) + else: + print("Running Alcor agent on {} couldn't be killed".format(ip_addr)) + '''mac_addr = get_mac_id(ip_addr) + print("Mac_addr", mac_addr, "for host", ip_addr, "\n") + ip_mac[ip_addr] = mac_addr''' + else: + print("AlcorControlAgent is not running on {}".format(ip_addr)) + +def get_gateway_for_ip(ip_addr): + gateways = read_config_file_section("gateways") + # print(gateways) + gateway_info = json.loads(gateways["gateway_info"]) + # print(gateway_info) + for gw in gateway_info: + for ip in gw["ips"]: + if ip == ip_addr: + # print("FOUND GW: IP = {}, GW = {}".format(ip_addr, gw["gw"])) + return gw["gw"] + return None diff --git a/merak/testscripts/jinkins/ping_test.py b/merak/testscripts/jinkins/ping_test.py new file mode 100644 index 0000000..46b0c25 --- /dev/null +++ b/merak/testscripts/jinkins/ping_test.py @@ -0,0 +1,220 @@ +#!/usr/bin/python3 + +# MIT License +# Copyright(c) 2020 Futurewei Cloud +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import time, os +import argparse +import textwrap +import json +from helper_functions import * +from create_test_setup import * +from container_ops import * +from create_test_cases import * + +ACA_BIN_PATH = "./repos/aca/build/bin/AlcorControlAgent" +ALCOR_ROOT = os.path.abspath(os.path.join(__file__, "../../../")) +ALCOR_SERVICES = ALCOR_ROOT + "/services/" +ALCOR_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) +os.chdir("../../") + +# Builds the Ignite and all Alcor images as configured in +# alcor_services.ini file +def build_containers(services_dict): + container_list = [] + mvn_build = "mvn -Dmaven.test.skip=true -DskipTests clean package install" + container_list.append(mvn_build) + + print("building container images") + services_list = get_file_list(ALCOR_SERVICES) + # segment_service is bogus service, only used to pick up the + # internal port number of vpc_manager, which is 9001 + for service_name in services_dict.keys(): + if service_name == "segment_service": + continue + service_path = ALCOR_SERVICES + service_name + service_info = json.loads(services_dict[service_name]) + build_image = "sudo docker build" + " -t {} ".format(service_info["name"]) + if service_name == "ignite": + docker_file = "-f {} {}".format(ALCOR_ROOT + service_info["path"], ALCOR_ROOT + "/lib") + else: + docker_file = ALCOR_SERVICES + service_name + docker_build_cmd = build_image + docker_file + container_list.append(docker_build_cmd) + + if(execute_commands("Build ", container_list) == True): + print("All Alcor services built successfully") + return True + else: + print("All Alcor services could not be built successfully") + return False + + +def start_containers(serv): + start_containers = [] + for service_name in serv.keys(): + extra_args = "" + if service_name == "segment_service": + continue + service_info = json.loads(serv[service_name]) + run_container = "sudo docker run --name={} ".format(service_info["name"]) + mnt_and_image = "-v /tmp:/tmp -tid {} ".format(service_info["name"]) + + if service_name == "ignite": + ports = "-p 10800:10800 -p 10801:10801 -p 47100:47100 -p 47500:47500 " + extra_args = "sh" + elif service_name == "vpc_manager": + # expose internal and external ports in VPM + vpm_info = json.loads(serv["segment_service"]) + ports = "--net=host -p {}:{} -p {}:{} ".format(service_info["port"], service_info["port"], vpm_info["port"], vpm_info["port"]) + else: + ports = "--net=host -p {}:{} ".format(service_info["port"], service_info["port"]) + + start_cmd = run_container + ports + mnt_and_image + if not extra_args: + start_cmd = start_cmd + " " + extra_args + + start_containers.append(start_cmd) + + if (True == execute_commands("Start ", start_containers)): + return True + else: + return False + + +def stop_containers(service_list): + command = "sudo docker container stop " + for service in service_list: + if service == "sgs": + continue + execute_command(command + service) + + +def remove_containers(service_list): + command = "sudo docker container rm " + for service in service_list: + if service == "sgs": + continue + execute_command(command + service) + + +def main(): + extra_wait_time = 120 + config_file = "{}/alcor_services.ini".format(ALCOR_TEST_DIR) + config_file_object = read_config_file(config_file) + services_dict = dict(config_file_object.items("services")) + service_port_map = get_service_port_map(services_dict) + parser = argparse.ArgumentParser(prog='ping_test', + formatter_class=argparse.RawDescriptionHelpFormatter, + description='Busybox ping test', + epilog=textwrap.dedent('''\ +Example of use: python script_name -b build +-t 1 : L2 Basic +-t 2 : L3_AttachRouter_then_CreatePorts (S4) +-t 3 : L3_CreatePorts_then_AttachRouter (S5) +''')) + parser.add_argument("-b", "--build", type=str, nargs='?', help=' to build alcor services provide :{} as an option'.format('-b build')) + parser.add_argument("-t", "--testcase", type=int, nargs='?', help='Test case number or {} for all tests cases. Default -t 1'.format('all'), default="1") + parser.add_argument("-s", "--all", type=str, nargs='?', help = 'all tests cases') + args = parser.parse_args() + print("PING TEST ARGS {}".format(args)) + + if args.build: + if(args.build == "build"): + if (build_containers(services_dict) == False): + sys.exit(1) + else: + print("To build before running the tests, use '-b build'") + print("ERROR: Quitting test\n") + sys.exit(1) + + stop_containers(service_port_map.keys()) + remove_containers(service_port_map.keys()) + if(start_containers(services_dict) == True): + print("All services started Sucessfully") + else: + print("ERROR: All Alcor services did NOT start successfully") + print("ERROR: Quitting test\n") + sys.exit(1) + + container_names_dict = dict(config_file_object.items("test_setup"))["container_names"] + container_names = json.loads(container_names_dict) + aca = dict(config_file_object.items("AlcorControlAgents")) + for aca_node,con in zip(aca.values(),container_names): + print("Busybox container cleanup...", aca_node, con) + busybox_container_cleanup(aca_node, con) + time.sleep(10) + + check_alcor_agents_running(aca) + time.sleep(30) + + if(restart_alcor_agents(aca, ACA_BIN_PATH) == False): + print("AlcorControlAgent did NOT start successfully") + print("Check the target nodes and run again") + print("ERROR: Quitting test\n") + sys.exit(1) + time.sleep(60) + + aca_nodes_ip_mac = get_macaddr_alcor_agents(aca) + print("ACA nodes IP MAC pair::", aca_nodes_ip_mac) + + if len(aca_nodes_ip_mac) != len(aca): + print("ERROR: Alcor Control Agent not running on some of the nodes") + print("ERROR: Quitting test\n") + sys.exit(1) + + print("Waiting for Alcor services to be up and running...\n") + if (check_alcor_services() == False): + print("ERROR: Alcor Services failed to start ...\n") + sys.exit(1) + + print("Alcor Services are up and running, but waiting for {} seconds more...\n".format(extra_wait_time)) + time.sleep(extra_wait_time) + + if args.testcase: + if (args.testcase == 1): + ip_mac_db = prepare_test_L2_basic(aca_nodes_ip_mac, service_port_map) + elif(args.testcase == 2): + ip_mac_db = prepare_test_L3_AttachRouter_then_CreatePorts(aca_nodes_ip_mac, service_port_map) + elif (args.testcase == 3): + ip_mac_db = prepare_test_L3_CreatePorts_then_AttachRouter(aca_nodes_ip_mac, service_port_map) + else: + print("Invoke {}".format('-t ')) + print("ERROR: Quitting test\n") + sys.exit(1) + else: + ip_mac_db = create_test_setup(aca_nodes_ip_mac, config_file_object) + + aca_node_ips = list(aca_nodes_ip_mac.keys()) + goal_state_ips = list(ip_mac_db.keys()) + print("Deploying containers on target nodes") + print("ACA nodes: ", aca_node_ips) + print("Goal states: ", ip_mac_db) + print("Container names: ", container_names) + busybox_container_deploy(aca_node_ips, ip_mac_db, container_names) + status = run_ping_test(aca_node_ips, goal_state_ips, container_names) + if status != 0: + print("ERROR: Quitting test\n") + sys.exit(1) + return True + + +if __name__ == "__main__": + if (main() == False): + sys.exit(1) + else: + sys.exit(0) diff --git a/merak/testscripts/jinkins/prepare_payload.py b/merak/testscripts/jinkins/prepare_payload.py new file mode 100644 index 0000000..92f2ce2 --- /dev/null +++ b/merak/testscripts/jinkins/prepare_payload.py @@ -0,0 +1,97 @@ +#!/usr/bin/python3 + +# MIT License +# Copyright(c) 2020 Futurewei Cloud +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import requests +import time +import json +from helper_functions import * + +ip_mac_db = {} + +def put_httprequest(url, data=""): + try: + headers = { + 'Content-Type': 'application/json', + 'Accept': '*/*', + } + print("PUTing http request") + print(url, data) + response = requests.put(url, data=json.dumps(data), headers=headers) + if(response.ok): + print("PUT Success", url) + else: + response.raise_for_status() + except requests.exceptions.HTTPError as err: + print("PUT Failed for {} with error".format(url, response.text)) + print(response.json) + print("ERROR",err) + raise SystemExit(err) + + +def post_httprequest(url, data=""): + try: + headers = { + 'Content-Type': 'application/json', + 'Accept': '*/*', + } + print("POSTing http request") + print(url, data) + response = requests.post(url, data=json.dumps(data), headers=headers) + if(response.ok): + print("POST Success", url) + if 'ports' in url: + valid_response = json.loads(response.text, object_pairs_hook = dict_clean) + get_mac_for_ips(valid_response) + print("POST RESPONSE: {}".format(valid_response)) + else: + response.raise_for_status() + except requests.exceptions.HTTPError as err: + print("POST Failed for {} with error".format(url, response.text)) + print(response.json) + print("ERROR",err) + raise SystemExit(err) + + +def get_mac_for_ips(valid_response): + print("in prepare_payload ", valid_response) + ports_info = valid_response["port"] + key = ports_info["fixed_ips"][0]["ip_address"] + value = ports_info["mac_address"] + ip_mac_db[key] = value + print("IP_MAC_DB = ", ip_mac_db) + + +def get_httprequest(url): + try: + response = requests.get(url) + if(response.ok): + print("GET Success", url) + return response.text + else: + response.raise_for_status() + except requests.HTTPError as exception: + print("GET failed for url", url) + raise SystemExit(exception) + + +def get_mac_from_db(): + print("\n\n\n>>>>>>>") + print("IP & MAC stored in ignite db", ip_mac_db) + return ip_mac_db + diff --git a/merak/testscripts/jinkins/remote_aca_build.sh b/merak/testscripts/jinkins/remote_aca_build.sh new file mode 100644 index 0000000..d1e2f03 --- /dev/null +++ b/merak/testscripts/jinkins/remote_aca_build.sh @@ -0,0 +1,86 @@ +#! /bin/sh + +# Build ACA on ACA nodes from alcor_services.ini +# Triggers a remote build on ACA nodes listed in alcor_services.ini. +# + +ALCOR_INI=alcor_services.ini +SCRIPT_DIR=`dirname $0` +SCRIPT_DIR=`realpath $SCRIPT_DIR` + +ACA_DIR="repos/aca" +ACA_NODES="" + +ACA_REPO="futurewei-cloud" +ACA_BRANCH="master" +ACA_COMMIT="HEAD" +USER_REPO=0 +USER_BRANCH=0 + +get_aca_node_info() { + ACA_NODES=`sed -n '/^\[AlcorControlAgents\]/,/^\[/p' ${ALCOR_INI} | grep -v '^\[' | grep -v '^[\t ]*$' | awk -F= '{print $2}' | tr -d '[\t ]'` +} + +if [ -d "${SCRIPT_DIR}" -a -s ${SCRIPT_DIR}/${ALCOR_INI} ]; then + true +else + echo "Missing ${ALCOR_INI} file, can only run in alcor/scripts/busybox-ping-test" + exit 1 +fi + +while getopts "r:b:c:f" opt; do + case "$opt" in + r) ACA_REPO=$OPTARG + USER_REPO=1 + ;; + + b) ACA_BRANCH=$OPTARG + USER_BRANCH=1 + ;; + + c) ACA_COMMIT=$OPTARG + ;; + + f) DO_FORCE=1 + ;; + ?) + echo "Usage: $0 [-r repo] [-b branch] [-c commit] [-f] +-r repo : repository, if using a personal fork. +-b branch : branch, if using a personal branch. +-c commit : commit, if not using the HEAD. +-f : to force a build even if there are no changes in remote. +" + exit 1 + ;; + esac +done + +get_aca_node_info +NC=0 +for node in `echo ${ACA_NODES}`; do + rm -f /tmp/aca_${node}_build.log > /dev/null 2>&1 + NC=`expr $NC + 1` + scp ./build_aca.sh ubuntu@${node}:${ACA_DIR}/ + ssh ubuntu@$node "cd $ACA_DIR && sudo ./build_aca.sh $ACA_REPO $ACA_BRANCH $ACA_COMMIT $DO_FORCE" > /tmp/aca_${node}_build.log 2>&1 & + echo "Scheduled build on $node" +done + +# check the status +SC=0 +for node in `echo ${ACA_NODES}`; do + while :; do + if fgrep 'Success: ACA machine init' /tmp/aca_${node}_build.log > /dev/null 2>&1; then + SC=`expr $SC + 1` + break + elif fgrep 'Failure: ACA machine init' /tmp/aca_${node}_build.log > /dev/null 2>&1; then + echo "ACA Build failed on $node" + exit 1 + else + sleep 60 + fi + done + if [ $SC -eq $NC ]; then + break + fi +done +exit 0 diff --git a/merak/testscripts/k6/K6_test_prepare.py b/merak/testscripts/k6/K6_test_prepare.py new file mode 100755 index 0000000..f67095b --- /dev/null +++ b/merak/testscripts/k6/K6_test_prepare.py @@ -0,0 +1,226 @@ +#!/usr/bin/python3 + +# MIT License +# Copyright(c) 2020 Futurewei Cloud +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import time, os +import argparse +import textwrap +import json +from helper_functions import * +from create_test_setup import * +from container_ops import * +from create_test_cases import * + +ACA_BIN_PATH = "/home/sdn/alcor-control-agent/build/bin/AlcorControlAgent" +ALCOR_ROOT = os.path.abspath(os.path.join(__file__, "../../../")) +ALCOR_SERVICES = ALCOR_ROOT + "/services/" +ALCOR_TEST_DIR = os.path.dirname(os.path.abspath(__file__)) +os.chdir("../../") + +# Builds the Ignite and all Alcor images as configured in +# alcor_services.ini file +def build_containers(services_dict): + container_list = [] + mvn_build = "mvn -Dmaven.test.skip=true -DskipTests clean package install" + container_list.append(mvn_build) + + print("building container images") + services_list = get_file_list(ALCOR_SERVICES) + # segment_service is bogus service, only used to pick up the + # internal port number of vpc_manager, which is 9001 + for service_name in services_dict.keys(): + if service_name == "segment_service": + continue + service_path = ALCOR_SERVICES + service_name + service_info = json.loads(services_dict[service_name]) + build_image = "sudo docker build" + " -t {} ".format(service_info["name"]) + if service_name == "ignite": + docker_file = "-f {} {}".format(ALCOR_ROOT + service_info["path"], ALCOR_ROOT + "/lib") + else: + docker_file = ALCOR_SERVICES + service_name + docker_build_cmd = build_image + docker_file + container_list.append(docker_build_cmd) + + if(execute_commands("Build ", container_list) == True): + print("All Alcor services built successfully") + return True + else: + print("All Alcor services could not be built successfully") + return False + + +def start_containers(serv): + start_containers = [] + for service_name in serv.keys(): + extra_args = "" + if service_name == "segment_service": + continue + service_info = json.loads(serv[service_name]) + run_container = "sudo docker run --name={} ".format(service_info["name"]) + mnt_and_image = "-v /tmp:/tmp -tid {} ".format(service_info["name"]) + + if service_name == "ignite": + ports = "-p 10800:10800 -p 10801:10801 -p 47100:47100 -p 47500:47500 " + extra_args = "sh" + elif service_name == "vpc_manager": + # expose internal and external ports in VPM + vpm_info = json.loads(serv["segment_service"]) + ports = "--net=host -p {}:{} -p {}:{} ".format(service_info["port"], service_info["port"], vpm_info["port"], vpm_info["port"]) + else: + ports = "--net=host -p {}:{} ".format(service_info["port"], service_info["port"]) + + start_cmd = run_container + ports + mnt_and_image + if not extra_args: + start_cmd = start_cmd + " " + extra_args + + start_containers.append(start_cmd) + + if (True == execute_commands("Start ", start_containers)): + return True + else: + return False + + +def stop_containers(service_list): + command = "sudo docker container stop " + for service in service_list: + if service == "sgs": + continue + execute_command(command + service) + + +def remove_containers(service_list): + command = "sudo docker container rm " + for service in service_list: + if service == "sgs": + continue + execute_command(command + service) + + +def main(): + extra_wait_time = 120 + config_file = "{}/alcor_services.ini".format(ALCOR_TEST_DIR) + config_file_object = read_config_file(config_file) + services_dict = dict(config_file_object.items("services")) + service_port_map = get_service_port_map(services_dict) + print("service_port_map",service_port_map) +# parser = argparse.ArgumentParser(prog='ping_test', +# formatter_class=argparse.RawDescriptionHelpFormatter, +# description='Busybox ping test', +# epilog=textwrap.dedent('''\ +# #Example of use: python script_name -b build +# #-t 1 : L2 Basic +# #-t 2 : L3_AttachRouter_then_CreatePorts (S4) +# #-t 3 : L3_CreatePorts_then_AttachRouter (S5) +# #''')) +# parser.add_argument("-b", "--build", type=str, nargs='?', help=' to build alcor services provide :{} as an option'.format('-b build')) +# parser.add_argument("-t", "--testcase", type=int, nargs='?', help='Test case number or {} for all tests cases. Default -t 1'.format('all'), default="1") +# parser.add_argument("-s", "--all", type=str, nargs='?', help = 'all tests cases') +# args = parser.parse_args() +# print("PING TEST ARGS {}".format(args)) + +# if args.build: +# if(args.build == "build"): +# if (build_containers(services_dict) == False): +# sys.exit(1) +# else: +# print("To build before running the tests, use '-b build'") +# print("ERROR: Quitting test\n") +# sys.exit(1) + + + stop_containers(service_port_map.keys()) + remove_containers(service_port_map.keys()) + if(start_containers(services_dict) == True): + print("All services started Sucessfully") + else: + print("ERROR: All Alcor services did NOT start successfully") + print("ERROR: Quitting test\n") + sys.exit(1) + + + + container_names_dict = dict(config_file_object.items("test_setup"))["container_names"] + print("container_names_dict",container_names_dict) + container_names = json.loads(container_names_dict) + print("container_names",container_names) + aca = dict(config_file_object.items("AlcorControlAgents")) + for aca_node,con in zip(aca.values(),container_names): + print("Busybox container cleanup...", aca_node, con) + busybox_container_cleanup(aca_node, con) + time.sleep(10) + + check_alcor_agents_running(aca) + time.sleep(30) + + if(restart_alcor_agents(aca, ACA_BIN_PATH) == False): + print("AlcorControlAgent did NOT start successfully") + print("Check the target nodes and run again") + print("ERROR: Quitting test\n") + sys.exit(1) + time.sleep(60) + + aca_nodes_ip_mac = get_macaddr_alcor_agents(aca) + print("ACA nodes IP MAC pair::", aca_nodes_ip_mac) + + if len(aca_nodes_ip_mac) != len(aca): + print("ERROR: Alcor Control Agent not running on some of the nodes") + print("ERROR: Quitting test\n") + sys.exit(1) + + print("Waiting for Alcor services to be up and running...\n") + if (check_alcor_services() == False): + print("ERROR: Alcor Services failed to start ...\n") + sys.exit(1) + + print("Alcor Services are up and running, but waiting for {} seconds more...\n".format(extra_wait_time)) + time.sleep(extra_wait_time) + + # if args.testcase: + # if (args.testcase == 1): + # ip_mac_db = prepare_test_L2_basic(aca_nodes_ip_mac, service_port_map) + # elif(args.testcase == 2): + # ip_mac_db = prepare_test_L3_AttachRouter_then_CreatePorts(aca_nodes_ip_mac, service_port_map) + # elif (args.testcase == 3): + # ip_mac_db = prepare_test_L3_CreatePorts_then_AttachRouter(aca_nodes_ip_mac, service_port_map) + # else: + # print("Invoke {}".format('-t ')) + # print("ERROR: Quitting test\n") + # sys.exit(1) + # else: + # ip_mac_db = create_test_setup(aca_nodes_ip_mac, config_file_object) + + # aca_node_ips = list(aca_nodes_ip_mac.keys()) + # goal_state_ips = list(ip_mac_db.keys()) + # print("Deploying containers on target nodes") + # print("ACA nodes: ", aca_node_ips) + # print("Goal states: ", ip_mac_db) + # print("Container names: ", container_names) + # busybox_container_deploy(aca_node_ips, ip_mac_db, container_names) + # status = run_ping_test(aca_node_ips, goal_state_ips, container_names) + # if status != 0: + # print("ERROR: Quitting test\n") + # sys.exit(1) + # return True + + +if __name__ == "__main__": + if (main() == False): + sys.exit(1) + else: + sys.exit(0) diff --git a/merak/testscripts/k6/alcor_http_api_test.js b/merak/testscripts/k6/alcor_http_api_test.js new file mode 100644 index 0000000..4703866 --- /dev/null +++ b/merak/testscripts/k6/alcor_http_api_test.js @@ -0,0 +1,381 @@ +/* +MIT License +Copyright(c) 2020 Futurewei Cloud + Permission is hereby granted, + free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of the Software, and to permit persons + to whom the Software is furnished to do so, subject to the following conditions: + The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +*/ + +/* + This is the code testing the Alcor HTTP APIs, currently support APIs: + 1. createVPC + 2. createSubnet + 3. createPort + + Params: + 1. vpm_ip + 2. vpm_port + 3. snm_ip + 4. snm_port + 5. pm_ip + 6. pm_port + 7. vpc_cidr_slash + 8. tenant_amount + 9. project_amount_per_tenant + 10. vpc_amount_per_project + 11. subnet_amount_per_vpc + 12. test_vpc_api = true + 13. test_subnet_api = true + 14. test_port_api = true + 15. call_api_rate = 100 + + the number of ports will be based on the vpc_cidr_slash and subnet_amount_per_vpc, for example, if vpc_cidr_slash + is 8, then the network cidr becomes 10.0.0.0/8, which has 2^(32-8) IPs, and say we have subnet_amount_per_vpc = 1024, + which is 2^10, then each subnet will have 2^(32-8-10) = 16384 ports, minus the two IPs(first and last in subnet cidr) + reserved by Alcor. +*/ + +import { sleep } from "k6"; +import { + get_alcor_services_info, + get_service_port_map, +} from "./alcor_services.js"; +import { + put_httprequest, + post_httprequest, + get_httprequest, + get_mac_from_db, +} from "./prepare_payload.js"; +import * as helper_functions from "./helper_functions.js"; +import { FormElement } from "k6/html"; + +var vpm_ip = get_alcor_services_info("TC", "vpm_ip"); +var vpm_port = get_alcor_services_info("TC", "vpm_ip"); +var snm_ip = get_alcor_services_info("TC", "snm_ip"); +var snm_port = get_alcor_services_info("TC", "snm_port"); +var pm_ip = get_alcor_services_info("TC", "pm_ip"); +var pm_port = get_alcor_services_info("TC", "pm_port"); + +/* + vpc_cidr_slash, the number after the slash in the vpc CIDR, + decides how big the VPC is, such as 10.0.0.0/16 or 10.0.0.0/8. +*/ +var vpc_cidr_slash = get_alcor_services_info("TC", "vpc_cidr_slash"); + +/* + tenant_amount = concurrency when calling APIs. +*/ +var tenant_amount = get_alcor_services_info("TC", "tenant_amount"); + +/* + project_amount_per_tenant, each tenant can have multiple projects. +*/ +var project_amount_per_tenant = get_alcor_services_info( + "TC", + "project_amount_per_tenant" +); + +/* + vpc_amount_per_project, each project can have multiple VPCs. + each VPC can have the same CIDR +*/ +var vpc_amount_per_project = get_alcor_services_info( + "TC", + "vpc_amount_per_project" +); + +/* + subnet_amount_per_vpc, each VPC can have multiple subnets. +*/ +var subnet_amount_per_vpc = get_alcor_services_info( + "TC", + "subnet_amount_per_vpc" +); + +var test_vpc_api = get_alcor_services_info("TC", "test_vpc_api"); +var test_subnet_api = get_alcor_services_info("TC", "test_subnet_api"); +var test_port_api = get_alcor_services_info("TC", "test_port_api"); +var call_api_rate = get_alcor_services_info("TC", "call_api_rate"); + +export function run_test_against_alcor_apis() { + console.log( + "Beginning of alcor API test, need to generate: " + + tenant_amount + + " tenants, \n" + + project_amount_per_tenant + + " projects for each tenant, \n" + + vpc_amount_per_project + + " VPCs for each project, \n" + + subnet_amount_per_vpc + + " subnets for each VPC, \n" + ); + + let tenant_uuids = []; + let tenant_projects = {}; + let project_vpcs = {}; + let vpc_subnets = {}; + let subnet_ports = {}; + let vpc_port_ips = []; + let subnet_port_amount = 1; + + for (let i = 0; i < tenant_amount; i++) { + let current_tenant_uuid = helper_functions.uuid(); + tenant_uuids.push(current_tenant_uuid); + let current_tenant_projects = []; + for (let j = 0; j < project_amount_per_tenant; j++) { + let current_project_id = helper_functions.uuid(); + current_tenant_projects.push(current_project_id); + let vpcs_inside_a_project = []; + for (let k = 0; k < vpc_amount_per_project; k++) { + /* + If you set it to /8 you will get a out-of-memory error. + /12 gives you more than 2 ^ 20 ports in a VPC, which is + 1,048,576, without causing the out-of-memory error. + */ + let vpc_cidr = "10.0.0.0/" + String(vpc_cidr_slash); + let current_vpc_id = helper_functions.uuid(); + let network = { + admin_state_up: true, + revision_number: 0, + cidr: vpc_cidr, + default: true, + description: "vpc-" + String(k), + dns_domain: "test-dns-domain", + id: current_vpc_id, + is_default: true, + mtu: 1400, + name: "vpc-" + String(k), + port_security_enabled: true, + project_id: current_project_id, + }; + let vpc_payload = { network: network }; + vpcs_inside_a_project.push(vpc_payload); + + /* + 1. Generate all port IPs from VPC CIDR range. + 2. Divide port IPs into groups based on subnet_amount_per_vpc; + 3. Each group is a subnet, calculate subnet CIDR and form its subnet payload and ports payload + */ + if (vpc_port_ips.length == 0) { + try { + console.log("Need to generate port IPs for the first time."); + vpc_port_ips = get_cidr_network_address_range(vpc_cidr); + subnet_port_amount = vpc_port_ips.length / subnet_amount_per_vpc; + console.log( + "Finished generating port IPs. Each subnet should have " + + String(subnet_port_amount) + + " ports" + ); + } catch (err) { + console.log(JSON.stringify(err)); + } + } + /* + Create subnet payload based on vpc payload + */ + if (test_subnet_api && vpc_port_ips.length != 0) { + let current_vpc_subnets = []; + console.log("Generating subnets"); + + for (let l = 0; l < subnet_amount_per_vpc; l++) { + // let subnet_start_ip = vpc_port_ips[(l * subnet_port_amount) + 0] + // let subnet_end_ip = vpc_port_ips[(l * subnet_port_amount) + subnet_port_amount - 1] + let subnet_range = vpc_port_ips.slice( + l * subnet_port_amount + 0, + l * subnet_port_amount + subnet_port_amount + ); + let subnet_cidr = subnet_range[0]; + let current_subnet_id = helper_functions.uuid(); + let subnet = { + cidr: subnet_cidr, + id: current_subnet_id, + ip_version: 4, + network_id: current_vpc_id, + name: "subnet" + l, + }; + let subnet_payload = { subnet: subnet }; + current_vpc_subnets.push(subnet_payload); + + if (test_port_api) { + let subnet_port_ips = vpc_port_ips.slice( + l * subnet_port_amount + 0, + l * subnet_port_amount + subnet_port_amount + ); + let current_subnet_ports = []; + for (let port_ip_in_subnet in subnet_port_ips) { + let subnet_fixed_ip = { + ip_address: port_ip_in_subnet, + subnet_id: current_subnet_id, + }; + let fixed_ips = [subnet_fixed_ip]; + + let port = { + admin_state_up: true, + description: "test_port", + device_id: "test_device_id", + device_owner: "compute:nova", + fast_path: true, + fixed_ips: fixed_ips, + id: helper_functions.uuid(), + mac_learning_enabled: true, + network_id: current_vpc_id, + securi_enabled: true, + project_id: current_project_id, + revision_number: 0, + tenant_id: current_tenant_uuid, + uplink_status_propagation: true, + }; + let port_payload = { port: port }; + current_subnet_ports.push(port_payload); + } + subnet_ports[current_subnet_id] = current_subnet_ports; + } + } + console.log("Finished generating subnets for vpc."); + vpc_subnets[current_vpc_id] = current_vpc_subnets; + } + } + project_vpcs[current_project_id] = vpcs_inside_a_project; + } + tenant_projects[current_tenant_uuid] = current_tenant_projects; + } + + if (test_vpc_api) { + console.log("Time to test VPC API!"); + // let create_vpc_jobs = helper_functions.cloneObj(project_vpcs) + let create_vpc_jobs = []; + for (let project_id in project_vpcs) { + for (let project_id_id in project_vpcs[project_id]) { + create_vpc_jobs.push(project_vpcs[project_id][project_id_id]); + } + } + // console.log(JSON.stringify(create_vpc_jobs)) + + for (let vpc_job in create_vpc_jobs) { + // console.log(JSON.stringify(create_vpc_jobs[vpc_job])) + let current_project_id = + create_vpc_jobs[vpc_job]["network"]["project_id"]; + // console.log("==========") + let create_vpc_url = + "http://" + + vpm_ip + + ":" + + vpm_port + + "/project/" + + current_project_id + + "/vpcs"; + let create_vpc_response = post_httprequest(create_vpc_url, vpc_job); + if ( + null != create_vpc_response && + create_vpc_response.hasOwnProperty("network") + ) { + console.log("Created VPC successfully"); + } + } + if (test_subnet_api || test_port_api) { + /* + If we are testing subnet API or port API, we need to wait until the VPC is created. + */ + sleep(600); + } else { + /* we actually don't need to wait latch_wait_seconds + because if we start the wait after the last call, we should actually wait for the last call. + So we will be waiting only 1 second at most. + */ + sleep(1); + } + } + + if (test_subnet_api) { + console.log("Time to test subnet API!"); + // let create_subnet_jobs = helper_functions.cloneObj(vpc_subnets) + let create_subnet_jobs = []; + // console.log("=============") + // console.log(JSON.stringify(vpc_subnets)) + for (let vpc_id in vpc_subnets) { + for (let vpc_id_id in vpc_subnets[vpc_id]) { + create_subnet_jobs.push(vpc_subnets[vpc_id][vpc_id_id]); + } + } + + for (let subnet_job in create_subnet_jobs) { + let create_subnet_url = + "http://" + + snm_ip + + ":" + + snm_port + + "/project/" + + current_project_id + + "/subnets"; + let create_vpc_response = post_httprequest(create_subnet_url, subnet_job); + if ( + null != create_vpc_response && + create_vpc_response.hasOwnProperty("subnet") + ) { + console.log("Created VPC successfully"); + } + } + + if (test_port_api) { + /* + If we are testing port API, we need to wait until the VPC is created. + */ + sleep(600); + } else { + /* we actually don't need to wait latch_wait_seconds + because if we start the wait after the last call, we should actually wait for the last call. + So we will be waiting only 1 second at most. + */ + sleep(1); + } + } + + if (test_port_api) { + console.log("Time to test port API!"); + // let create_port_jobs = helper_functions.cloneObj(subnet_ports); + // for (let subnet_id in create_port_jobs) { + // current_subnet_ports[subnet_id].pop(); + // current_subnet_ports[subnet_id].shift(); + // } + + let create_port_jobs = [] + for (let subnet_id in subnet_ports) { + let current_subnet_ports = subnet_ports[subnet_id] + current_subnet_ports.pop() + current_subnet_ports.shift() + for (let subnet_id_id in current_subnet_ports) { + create_port_jobs.push(current_subnet_ports[subnet_id_id]) + } + } + + for (let port_job in create_port_jobs) { + let current_project_id = port_job["port"]["project_id"]; + let create_port_url = + "http://" + + pm_ip + + ":" + + pm_port + + "/project/" + + current_project_id + + "/ports"; + let create_vpc_response = post_httprequest(create_port_url, port_job); + if ( + null != create_vpc_response && + create_vpc_response.hasOwnProperty("port") + ) { + console.log("Created VPC successfully"); + } + } + sleep(1); + } +} + +// K6 test function +export default function() { + let res = run_test_against_alcor_apis(); +} \ No newline at end of file diff --git a/merak/testscripts/k6/alcor_services.js b/merak/testscripts/k6/alcor_services.js new file mode 100644 index 0000000..30f4945 --- /dev/null +++ b/merak/testscripts/k6/alcor_services.js @@ -0,0 +1,250 @@ +var alcor_services = {} + +// [services] +alcor_services["services"] = {} +alcor_services["services"]["ignite"] = { + "name": "ignite", + "port": 10800, + "path": "/lib/ignite.Dockerfile" +} +// vpc_manager internal port : 9001 +alcor_services["services"]["vpc_manager"] = { "name": "vpm", "port": 9009 } + +// Segment handling can't be routed through API gateway but has to go +// directly to vpc_manager but other vpc related requests will have to +// go through API gateway. A segment_service is added as an alias of +// vpc_manager internal port for handling this situation. At present, ping +// test uses it to create default segment table. +alcor_services["services"]["segment_service"] = { "name": "sgs", "port": 9001 } + +// subnet_manager internal port : 9002 +alcor_services["services"]["subnet_manager"] = { "name": "snm", "port": 9009 } + +// route_manager internal port : 9003 +alcor_services["services"]["route_manager"] = { "name": "rm", "port": 9009 } + +// private_ip_manager internal port : 9004 +alcor_services["services"]["private_ip_manager"] = { "name": "pim", "port": 9009 } + +// mac_manager (virtual mac manager) internal port : 9005 +alcor_services["services"]["mac_manager"] = { "name": "mm", "port": 9009 } + +// port_manager internal port : 9006 +alcor_services["services"]["port_manager"] = { "name": "pm", "port": 9009 } + +// This can't be routed through API GW +alcor_services["services"]["node_manager"] = { "name": "nm", "port": 9007 } + +// security_group_manager internal port : 9008 +alcor_services["services"]["security_group_manager"] = { "name": "sgm", "port": 9009 } + +alcor_services["services"]["api_gateway"] = { "name": "ag", "port": 9009 } + +// data_plane_manager internal port : 9010 +alcor_services["services"]["data_plane_manager"] = { "name": "dpm", "port": 9009 } + +// elastic_ip_manager internal port : 9011 +alcor_services["services"]["elastic_ip_manager"] = { "name": "eim", "port": 9009 } + +// quota_manager internal port : 9012 +alcor_services["services"]["quota_manager"] = { "name": "qm", "port": 9009 } + +// network_acl_manager internal port : 9013 +alcor_services["services"]["network_acl_manager"] = { "name": "nam", "port": 9009 } + +// network_config_manager internal port : 9014 +alcor_services["services"]["network_config_manager"] = { "name": "ncm", "port": 9009 } + +// gateway_manager internal port : 9015 +alcor_services["services"]["gateway_manager"] = { "name": "gm", "port": 9009 } + +// [AlcorControlAgents] +alcor_services["AlcorControlAgents"] = {} +alcor_services["AlcorControlAgents"]["node1"] = "172.16.62.212" +alcor_services["AlcorControlAgents"]["node2"] = "172.16.62.213" + +// [test_setup] +alcor_services["test_setup"] = {} +alcor_services["test_setup"]["vpc_id"] = "9192a4d4-ffff-4ece-b3f0-8d36e3d88001" +alcor_services["test_setup"]["project_id"] = "3dda2801-d675-4688-a63f-dcda8d327f50" +alcor_services["test_setup"]["tenant_id"] = "3dda2801-d675-4688-a63f-dcda8d327f50" +alcor_services["test_setup"]["network_id"] = "9192a4d4-ffff-4ece-b3f0-8d36e3d88001" +alcor_services["test_setup"]["cidr"] = "172.16.0.0/16" +alcor_services["test_setup"]["node_id"] = [ + "1112a4d4-ffff-4ece-b3f0-8d36e3d85001", + "1112a4d4-ffff-4ece-b3f0-8d36e3d85002" +] +alcor_services["test_setup"]["node_name"] = ["node1", "node2"] +alcor_services["test_setup"]["subnet_id"] = "8182a4d4-ffff-4ece-b3f0-8d36e3d88001" +alcor_services["test_setup"]["security_group_id"] = "3dda2801-d675-4688-a63f-dcda8d111111" +alcor_services["test_setup"]["device_id"] = [ + "8182a4d4-ffff-4ece-b3f0-8d36e3d00001", + "8182a4d4-ffff-4ece-b3f0-8d36e3d00002" +] +alcor_services["test_setup"]["port_name"] = ["port101", "port102"] +alcor_services["test_setup"]["port_id"] = [ + "7122a4d4-ffff-5eee-b3f0-8d36e3d01101", + "7122a4d4-ffff-5eee-b3f0-8d36e3d02201" +] +alcor_services["test_setup"]["ip_addrs"] = ["172.16.1.101", "172.16.1.102"] +alcor_services["test_setup"]["container_names"] = ["con1", "con2"] + +// [L3_AttachRouter_then_CreatePorts] +alcor_services["L3_AttachRouter_then_CreatePorts"] = {} +alcor_services["L3_AttachRouter_then_CreatePorts"]["subnet_ids"] = [ + "8182a4d4-ffff-4ece-b3f0-8d36e3d88001", + "8182a4d4-ffff-4ece-b3f0-8d36e3d88002" +] +alcor_services["L3_AttachRouter_then_CreatePorts"]["cidrs"] = [ + "172.16.1.0/24", + "172.16.2.0/24" +] +alcor_services["L3_AttachRouter_then_CreatePorts"]["ip_addrs"] = [ + "172.16.1.101", + "172.16.2.201" +] +alcor_services["L3_AttachRouter_then_CreatePorts"]["subnet_names"] = [ + "subnet1", + "subnet2" +] +alcor_services["L3_AttachRouter_then_CreatePorts"]["device_ids"] = [ + "8182a4d4-ffff-4ece-b3f0-8d36e3d00001", + "8182a4d4-ffff-4ece-b3f0-8d36e3d00002" +] + +// [L2_basic] +alcor_services["L2_basic"] = {} +alcor_services["L2_basic"]["security_group_ids"] = [ + "3dda2801-d675-4688-a63f-dcda8d111111", + "3dda2801-d675-4688-a63f-dcda8d111112" +] +alcor_services["L2_basic"]["sg_names"] = ["sg1", "sg2"] +alcor_services["L2_basic"]["device_ids"] = [ + "8182a4d4-ffff-4ece-b3f0-8d36e3d00001", + "8182a4d4-ffff-4ece-b3f0-8d36e3d00002" +] + +// [gateways] +alcor_services["gateways"] = {} +alcor_services["gateways"]["gateway_info"] = [ + { "gw": "172.16.1.1", "ips": ["172.16.1.101", "172.16.1.102"] }, + { "gw": "172.16.2.1", "ips": ["172.16.2.201"] } +] + +// [vpc_info] +alcor_services["vpc_info"] = {} +alcor_services["vpc_info"]["vpc_info"] = { + "cidr": alcor_services["test_setup"]["cidr"], + "id": alcor_services["test_setup"]["vpc_id"], + "project_id": alcor_services["test_setup"]["project_id"] +} + +// [node_info] +alcor_services["node_info"] = {} +alcor_services["node_info"]["node_info"] = { + "node_id": alcor_services["test_setup"]["node_id"], + "node_name": alcor_services["test_setup"]["node_name"], + "server_port": 8080, + "veth": "eth0" +} + +// [subnet_info] +alcor_services["subnet_info"] = {} +alcor_services["subnet_info"]["subnet_info"] = { + "cidr": alcor_services["test_setup"]["cidr"], + "id": alcor_services["test_setup"]["subnet_id"], + "ip_version": 4, + "network_id": alcor_services["test_setup"]["network_id"], + "name": "subnet1", + "host_routes": [ + { + "destination": "172.16.1.0/24", + "nexthop": "172.16.1.1" + } + ] +} + +// [security_groups] +alcor_services["security_groups"] = {} +alcor_services["security_groups"]["security_group_info"] = { + "create_at": "string", + "description": "string", + "id": alcor_services["test_setup"]["security_group_id"], + "name": "sg1", + "project_id": alcor_services["test_setup"]["project_id"], + "security_group_rules": [], + "tenant_id": alcor_services["test_setup"]["tenant_id"], + "update_at": "string" +} + +// [port_info] +alcor_services["port_info"] = {} +alcor_services["port_info"]["port_info"] = { + "binding:host_id": alcor_services["test_setup"]["node_name"], + "device_id": alcor_services["test_setup"]["device_id"], + "fixed_ips": alcor_services["test_setup"]["ip_addrs"], + "subnet_id": alcor_services["test_setup"]["subnet_id"], + "id": alcor_services["test_setup"]["port_id"], + "name": alcor_services["test_setup"]["port_name"], + "network_id": alcor_services["test_setup"]["network_id"], + "project_id": alcor_services["test_setup"]["project_id"], + "security_groups": alcor_services["test_setup"]["security_group_id"], + "tenant_id": alcor_services["test_setup"]["tenant_id"] +} + +// [router_info] +alcor_services["router_info"] = {} +alcor_services["router_info"]["router_info"] = { + "name": "router1", + "owner": alcor_services["test_setup"]["vpc_id"], + "network_id": alcor_services["test_setup"]["network_id"], + "project_id": alcor_services["test_setup"]["project_id"], + "security_groups": alcor_services["test_setup"]["security_group_id"], + "tenant_id": alcor_services["test_setup"]["tenant_id"], + "id": "11112801-d675-4688-a63f-dcda8d327f50" +} + +// Test Controller Alcor HTTP APIs Test Params +alcor_services["TC"]={} +alcor_services["TC"]["vpm_ip"]="127.0.0.1" +alcor_services["TC"]["vpm_port"] = 8080 +alcor_services["TC"]["snm_ip"] = "127.0.0.1" +alcor_services["TC"]["snm_port"] = 8081 +alcor_services["TC"]["pm_ip"] = "127.0.0.1" +alcor_services["TC"]["pm_port"] = 8081 +alcor_services["TC"]["vpc_cidr_slash"] = 8 +alcor_services["TC"]["tenant_amount"] = 1 +alcor_services["TC"]["project_amount_per_tenant"] = 1 +alcor_services["TC"]["vpc_amount_per_project"] = 1 +alcor_services["TC"]["subnet_amount_per_vpc"] = 1 +alcor_services["TC"]["test_vpc_api"] = true +alcor_services["TC"]["test_subnet_api"] = true +alcor_services["TC"]["test_port_api"] = true +alcor_services["TC"]["call_api_rate"] = 100 + +export function get_alcor_services_info(key1, key2) { + if (alcor_services.hasOwnProperty(key1)) { + if (alcor_services[key1].hasOwnProperty(key2)) { + return alcor_services[key1][key2] + } + else { + return "key2 none" + } + } + else { + return "key1 none" + } +} + +export function get_service_port_map() { + let service_list = {} + if (alcor_services.hasOwnProperty("services")) { + for(let service_name in alcor_services["services"]) + { + let service_info = alcor_services["services"][service_name] + service_list[service_info["name"]] = service_info["port"] + } + } + // console.log("service_list: " + JSON.stringify(service_list)) + return service_list +} diff --git a/merak/testscripts/k6/create_test_cases.js b/merak/testscripts/k6/create_test_cases.js new file mode 100644 index 0000000..ef0bf33 --- /dev/null +++ b/merak/testscripts/k6/create_test_cases.js @@ -0,0 +1,355 @@ +// MIT License +// Copyright(c) 2020 Futurewei Cloud +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files(the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import { get_alcor_services_info, get_service_port_map } from './alcor_services.js' +import * as create_test_setup from './create_test_setup.js' +import { put_httprequest, post_httprequest, get_httprequest, get_mac_from_db } from './prepare_payload.js' + +// test_case is the scenario section header, must contain +// all of the subnet information. Different scenarios may use +// the same test_case setup so to avoid confusion, passin +// the scenario name which will appear in the test output. +export function create_subnets(snm_port, test_case, scenario) { + console.log(`creating subnets for scenario ${scenario}`); + let id_list = get_alcor_services_info(test_case, "subnet_ids") + let id_names = get_alcor_services_info(test_case, "subnet_names") + let device_ids = get_alcor_services_info(test_case, "device_ids") + let cidrs = get_alcor_services_info(test_case, "cidrs") + let ip_addrs = get_alcor_services_info(test_case, "ip_addrs") + let subnetinfo = get_alcor_services_info("subnet_info", "subnet_info") + + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${snm_port}/project/${projectid}/subnets` + console.log("url: " + url) + + let length = Math.min(cidrs.length, id_list.length, id_names.length) + let subnet = {} + for (let idx = 0; idx < length; idx++) { + subnetinfo['cidr'] = cidrs[idx] + subnetinfo['id'] = id_list[idx] + subnetinfo['name'] = id_names[idx] + subnet["subnet"] = subnetinfo + post_httprequest(url, subnet) + } + console.log("verifying created subnets") + let res = get_httprequest(url); + console.log(JSON.stringify(res)); + console.log(`FINISH: created subnets for scenario ${scenario}`) + return { "id_list": id_list, "device_ids": device_ids, "ip_addrs": ip_addrs } +} + + +export function create_security_groups(port) { + console.log("Creating security groups") + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/security-groups` + console.log("url: " + url) + + let sginfo = get_alcor_services_info("security_groups", "security_group_info") + let id_list = get_alcor_services_info("L2_basic", "security_group_ids") + let id_names = get_alcor_services_info("L2_basic", "sg_names") + let device_ids = get_alcor_services_info("L2_basic", "device_ids") + + let length = Math.min(device_ids.length, id_list.length, id_names.length) + let security_groups = {} + for (let idx = 0; idx < length; idx++) { + sginfo['id'] = id_list[idx] + sginfo['name'] = id_names[idx] + security_groups["security_group"] = sginfo + console.log("SG ", JSON.stringify(security_groups)) + post_httprequest(url, security_groups) + } + console.log("FINISH: creating security groups"); + return { "id_list": id_list, "device_ids": device_ids } +} + +export function attach_subnets_to_router(rm_port, snm_port, router_id, subnet_id_list) { + console.log("Attaching subnets to router") + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${rm_port}/project/${projectid}/routers/${router_id}/add_router_interface` + + for (let id in subnet_id_list) { + let subnet_info = { "subnet_id": id } + put_httprequest(url, subnet_info) + } + + let req = `http://localhost:${rm_port}/project/${projectid}/routers` + console.log("Attached router info") + let res = get_httprequest(req) + console.log(JSON.stringify(res)) + console.log("FINISH: attaching subnets to router") +} + +export function create_port_goal_states(port, change_ports) { + console.log("Creating goal state...") + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/ports` + + let port_dict = get_alcor_services_info("port_info", "port_info") + let port_name = port_dict['name'] + let port_id = port_dict['id'] + let node_name = port_dict['binding:host_id'] + + let subnet_ids = [] + let security_groups = [] + let ip_addrs = [] + let changes = change_ports['change'] + + let device_ids = [] + if ('subnet_id' in changes) { + subnet_ids = change_ports['subnet_id'] + } + else { + // Adding the same subnet ID twice because it is going to be same for two ports we are creating + subnet_ids.push(port_dict['subnet_id']) + subnet_ids.push(port_dict['subnet_id']) + } + + if ('device_id' in changes) { + device_ids = change_ports['device_ids'] + } + else { + console.log("Adding same device id twice...") + // Adding the same device ID twice because it is going to be same for two ports we are creating + device_ids.push(port_dict['device_id']) + device_ids.push(port_dict['device_id']) + } + + if ('security_groups' in changes) { + security_groups = change_ports['security_groups'] + } + else { + // Adding the same security group ID twice because it is going to be same for two ports we are creating + security_groups.push(port_dict['security_groups']) + security_groups.push(port_dict['security_groups']) + } + + if ('ip_addrs' in changes) { + ip_addrs = change_ports['ip_addrs'] + } + else { + ip_addrs = port_dict['fixed_ips'] + } + + for (let index = 0; index < ip_addrs.length; index++) { + let ports = {} + let port_info = { + "admin_state_up": true, + "allowed_address_pairs": [ + { + "ip_address": "11.11.11.11", + "mac_address": "00-AA-BB-15-EB-3F" + } + ], + "binding:host_id": node_name[index], + "binding:vif_details": {}, + "create_at": "string", + "description": "string", + "device_id": device_ids[index], + "device_owner": "compute:nova", + "dns_assignment": {}, + "dns_domain": "string", + "dns_name": "string", + "extra_dhcp_opts": [ + { + "ip_version": "string", + "opt_name": "string", + "opt_value": "string" + } + ], + "fast_path": true, + "fixed_ips": [ + { + "ip_address": ip_addrs[index], + "subnet_id": subnet_ids[index] + } + ], + "id": port_id[index], + "mac_learning_enabled": true, + "name": port_name[index], + "network_id": port_dict['network_id'], + "network_ns": "string", + "port_security_enabled": true, + "project_id": port_dict['project_id'], + "qos_network_policy_id": "string", + "qos_policy_id": "string", + "revision_number": 0, + "security_groups": [security_groups[index]], + "tags": ["string"], + "tenant_id": port_dict['tenant_id'], + "update_at": "string", + "uplink_status_propagation": true, + "veth_name": "string" + } + ports["port"] = port_info + console.log("ports: " + JSON.stringify(ports)) + console.log("url: " + url) + console.log("Posting goal state") + post_httprequest(url, ports) + } + console.log("FINISH: creating goal state...") +} + + +// Test case 1: L2 Basic +// Two nodes in same subnet in different seurity groups +export function prepare_test_L2_basic(ip_mac, ser_port) { + let test_name = "L2_basic" + console.log(`Preparing test case ${test_name}...`) + + create_test_setup.create_default_segment_table(ser_port["sgs"]) + create_test_setup.create_vpc(ser_port["vpm"]) + create_test_setup.create_node(ser_port["nm"], ip_mac) + create_test_setup.create_subnet(ser_port["snm"]) + let res = create_security_groups(ser_port["sgm"]) + let id_list = res["id_list"] + let device_ids = res["device_ids"] + let change_ports = { + "change": ["security_groups", "device_id"], + "security_groups": id_list, + "device_ids": device_ids + } + create_port_goal_states(ser_port["pm"], change_ports) + let ip_mac_db = get_mac_from_db() + console.log(`Test case ${test_name}. IP/MAC in ignite DB: ` + + JSON.stringify(ip_mac_db)) + console.log(`FINISH: preparing test case ${test_name}...`) + return ip_mac_db +} + +// Test case 2: L3_AttachRouter_then_CreatePorts (S4) +// Two nodes in different subnets, in same same sg +// Order of network element creation is: +// 1) Create default segement table +// 2) Create nodes +// 3) Create VPC +// 4) Create security group +// 5) Create create subnets +// 6) Create router +// 7) Attach subnets to router +// 8) Create ports +export function prepare_test_L3_AttachRouter_then_CreatePorts(ip_mac, ser_port) { + let test_name = "L3_AttachRouter_then_CreatePorts" + console.log(`Preparing test case ${test_name}...`) + + create_test_setup.create_default_segment_table(ser_port["sgs"]) + create_test_setup.create_node(ser_port["nm"], ip_mac) + + let change = { 'change': 'cidr', 'cidr': "172.16.0.0/16" } + create_test_setup.create_vpc(ser_port["vpm"], change) + create_test_setup.create_security_group(ser_port["sgm"]) + + // create router + let router_id = create_test_setup.create_router_interface(ser_port["rm"]) + create_test_setup.get_vpcs(ser_port["vpm"]) + + // create subnets + // Relevant subnet info from L3_AttachRouter_then_CreatePorts (S4) + let res = create_subnets(ser_port["snm"], test_name, "S4") + let id_list = res["id_list"] + let device_ids = res["device_ids"] + let ip_addrs = res["ip_addrs"] + + // attach subnets to router + attach_subnets_to_router(ser_port["rm"], ser_port["snm"], router_id, id_list) + create_test_setup.get_subnets(ser_port["snm"]) + let change_ports = { + "change": [ + "subnet_id", + "device_id", + "ip_addrs" + ], + "subnet_id": id_list, + "device_ids": device_ids, + "ip_addrs": ip_addrs + } + create_port_goal_states(ser_port["pm"], change_ports) + + let ip_mac_db = get_mac_from_db() + console.log(`Test case ${test_name}. IP/MAC in ignite DB: ` + + JSON.stringify(ip_mac_db)) + console.log(`FINISH: preparing test case ${test_name}...`) + return ip_mac_db +} + +// test case 3: L3_CreatePorts_then_AttachRouter (S5) +// Two nodes in different subnets and same security group but +// Order of network element creation is: +// 1) Create default segement table +// 2) Create nodes +// 3) Create VPC +// 4) Create security group +// 5) Create create subnets +// 6) Create ports +// 7) Create router +// 8) Attach subnets to router +export function prepare_test_L3_CreatePorts_then_AttachRouter(ip_mac, ser_port) { + let test_name = "L3_CreatePorts_then_AttachRouter" + console.log(`Preparing test case ${test_name}...`) + + create_test_setup.create_default_segment_table(ser_port["sgs"]) + create_test_setup.create_node(ser_port["nm"], ip_mac) + let change = { 'change': 'cidr', 'cidr': "172.16.0.0/16" } + create_test_setup.create_vpc(ser_port["vpm"], change) + create_test_setup.get_vpcs(ser_port["vpm"]) + create_test_setup.create_security_group(ser_port["sgm"]) + + // create subnets + // Relevant subnet info from L3_AttachRouter_then_CreatePorts (S4) + let res = create_subnets(ser_port["snm"], + "L3_AttachRouter_then_CreatePorts", "S5") + + let id_list = res["id_list"] + let device_ids = res["device_ids"] + let ip_addrs = res["ip_addrs"] + + create_test_setup.get_subnets(ser_port["snm"]) + + // create ports + let change_ports = { + "change": [ + "subnet_id", + "device_id", + "ip_addrs" + ], + "subnet_id": id_list, + "device_ids": device_ids, + "ip_addrs": ip_addrs + } + create_port_goal_states(ser_port["pm"], change_ports) + + // create router + let router_id = create_test_setup.create_router_interface(ser_port["rm"]) + + // attach subnets to router + attach_subnets_to_router(ser_port["rm"], ser_port["snm"], router_id, id_list) + + let ip_mac_db = get_mac_from_db() + console.log(`Test case ${test_name}. IP/MAC in ignite DB: ` + + JSON.stringify(ip_mac_db)) + console.log(`FINISH: preparing test case ${test_name}...`) + return ip_mac_db +} + +export default function () { + let ip_mac = [{ "ip": "172.16.62.212", "mac": "a4:ae:12:79:c9:81" }, { "ip": "172.16.62.213", "mac": "a4:ae:12:79:5a:27" }] + let ser_port = get_service_port_map() + prepare_test_L2_basic(ip_mac, ser_port) +// prepare_test_L3_AttachRouter_then_CreatePorts(ip_mac, ser_port) +// prepare_test_L3_CreatePorts_then_AttachRouter(ip_mac, ser_port) +} + diff --git a/merak/testscripts/k6/create_test_setup.js b/merak/testscripts/k6/create_test_setup.js new file mode 100644 index 0000000..d8fea87 --- /dev/null +++ b/merak/testscripts/k6/create_test_setup.js @@ -0,0 +1,296 @@ +// MIT License +// Copyright(c) 2020 Futurewei Cloud +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files(the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import { sleep } from 'k6'; +import { get_alcor_services_info, get_service_port_map } from './alcor_services.js' +import { post_httprequest, get_httprequest, get_mac_from_db } from './prepare_payload.js' + +export function create_default_segment_table(port) { + console.log("Create default segment table"); + let url = `http://localhost:${port}/segments/createDefaultTable`; + sleep(3); + post_httprequest(url) + console.log("FINISH: Created default segment table\n"); +} + +export function create_node(port, ip_mac) { + console.log("Creating nodes"); + let url = `http://localhost:${port}/nodes`; + console.log("url: " + url) + let data = {} + let nodeinfo = get_alcor_services_info("node_info", "node_info") + // console.log(JSON.stringify(nodeinfo)) + let node_name = nodeinfo['node_name'] + let node_id = nodeinfo['node_id'] + // console.log(node_name,node_id) + + // let ip_mac =[{"ip":"1.1.1.1","mac":"123"},{"ip":"2.2.2.2","mac":"345"}] + for (let key_index = 0; key_index < ip_mac.length; key_index++) { + let node_info = { + "local_ip": ip_mac[key_index]["ip"], + "mac_address": ip_mac[key_index]["mac"], + "node_id": node_id[key_index], + "node_name": node_name[key_index], + "server_port": nodeinfo['server_port'], + "veth": nodeinfo['veth'] + } + data["host_info"] = node_info + post_httprequest(url, data) + } + console.log("FINISH: Created nodes\n") +} + +export function create_router_interface(port) { + console.log("Creating router interface\n") + let router = {} + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/routers` + console.log("url: " + url) + + let routerinfo = get_alcor_services_info("router_info", "router_info") + let route_info = { + "admin_state_up": true, + "availability_zone_hints": ["string"], + "availability_zones": ["string"], + "conntrack_helpers": ["string"], + "description": "string", + "distributed": true, + "external_gateway_info": { + "enable_snat": true, + "external_fixed_ips": [], + "network_id": routerinfo['network_id'] + }, + "flavor_id": "string", + "gateway_ports": [], + "ha": true, + "id": routerinfo['id'], + "name": routerinfo['name'], + "owner": routerinfo['owner'], + "project_id": routerinfo['project_id'], + "revision_number": 0, + "routetable": {}, + "service_type_id": "string", + "status": "BUILD", + "tags": ["string"], + "tenant_id": routerinfo['tenant_id'] + } + router['router'] = route_info + post_httprequest(url, router) + console.log("FINISH: Created router interface\n") + return routerinfo['id'] +} + +export function create_vpc(port, change = {}) { + console.log("Creating VPC\n") + let network = {} + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/vpcs` + console.log("url: " + url) + + let networkinfo = get_alcor_services_info("vpc_info", "vpc_info") + if ('change' in change) { + networkinfo[change['change']] = change[change['change']] + } + let network_info = { + "admin_state_up": true, + "revision_number": 0, + "cidr": networkinfo['cidr'], + "default": true, + "description": "vpc", + "dns_domain": "domain", + "id": networkinfo['id'], + "is_default": true, + "mtu": 1400, + "name": "sample_vpc", + "port_security_enabled": true, + "project_id": networkinfo['project_id'] + } + network["network"] = network_info + post_httprequest(url, network) + console.log("FINISH: Created VPC\n") +} + +export function create_subnet(port) { + console.log("Creating Subnet\n") + let subnet = {} + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/subnets` + console.log("url: " + url) + + let subnetinfo = get_alcor_services_info("subnet_info", "subnet_info") + subnet["subnet"] = subnetinfo + post_httprequest(url, subnet) + console.log("FINISH: Creating Subnet\n") +} + +export function create_security_group(port) { + console.log("Creating security group") + let security_groups = {} + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/security-groups` + console.log("url: " + url) + + let sginfo = get_alcor_services_info("security_groups", "security_group_info") + security_groups["security_group"] = sginfo + post_httprequest(url, security_groups) + console.log("FINISH: Created security group\n") +} + +export function get_subnets(port) { + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/subnets` + console.log("url: " + url) + + let res = get_httprequest(url) + console.log(JSON.stringify(res)); +} + +export function get_nodes(port) { + let url = `http://localhost:${port}/nodes` + console.log("url: " + url) + + let res = get_httprequest(url) + console.log(JSON.stringify(res)); +} + +export function get_vpcs(port) { + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/vpcs` + console.log("url: " + url) + + let res = get_httprequest(url) + console.log(JSON.stringify(res)); +} + +export function get_ports(port) { + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/ports` + console.log("url: " + url) + + let res = get_httprequest(url) + console.log(JSON.stringify(res)); +} + +export function get_security_group(port) { + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/security-groups` + console.log("url: " + url) + + let res = get_httprequest(url) + console.log(JSON.stringify(res)); +} + + +export function create_ports(port) { + console.log("Creating Goal State Ports"); + let projectid = get_alcor_services_info("test_setup", "project_id") + let url = `http://localhost:${port}/project/${projectid}/ports` + console.log("url: " + url) + + let portinfo = get_alcor_services_info("port_info", "port_info") + let port_name = portinfo['name'] + let port_id = portinfo['id'] + let ip_addrs = portinfo['fixed_ips'] + let node_name = portinfo['binding:host_id'] + let device_id = portinfo['device_id'] + + for (let index = 0; index < ip_addrs.length; index++) { + let ports = {} + let port_info = { + "admin_state_up": true, + "allowed_address_pairs": [{ + "ip_address": "11.11.11.1", + "mac_address": "00-AA-BB-15-EB-3F" + }], + "binding:host_id": node_name[index], + "binding:vif_details": {}, + "create_at": "string", + "description": "string", + "device_id": device_id[index], + "device_owner": "compute:nova", + "dns_assignment": {}, + "dns_domain": "string", + "dns_name": "string", + "extra_dhcp_opts": [{ + "ip_version": "string", + "opt_name": "string", + "opt_value": "string" + }], + "fast_path": true, + "fixed_ips": [{ + "ip_address": ip_addrs[index], + "subnet_id": portinfo['subnet_id'] + }], + "id": port_id[index], + "mac_learning_enabled": true, + "name": port_name[index], + "network_id": portinfo['network_id'], + "network_ns": "string", + "port_security_enabled": true, + "project_id": portinfo['project_id'], + "qos_network_policy_id": "string", + "qos_policy_id": "string", + "revision_number": 0, + "security_groups": [ + portinfo['security_groups'] + ], + "tags": ["string"], + "tenant_id": portinfo['tenant_id'], + "update_at": "string", + "uplink_status_propagation": true, + "veth_name": "string" + } + ports["port"] = port_info + post_httprequest(url, ports) + } + console.log("FINISH: Created Goal State Ports\n") +} + + +export function create_test_setup(ip_mac) { + console.log("Calling Alcor APIs to generate Goal States\n") + let service_port_map = get_service_port_map() + console.log("service_port_map: " + JSON.stringify(service_port_map)) + + create_default_segment_table(service_port_map["sgs"]) + create_vpc(service_port_map["vpm"]) + create_node(service_port_map["nm"], ip_mac) + create_subnet(service_port_map["snm"]) + create_security_group(service_port_map["sgm"]) + create_ports(service_port_map["pm"]) + + let ip_mac_db = get_mac_from_db() + console.log("Goal State IP/MACs: " + JSON.stringify(ip_mac_db)) + return ip_mac_db +} + +// simple test +export default function() { +<<<<<<< HEAD + // let ip_mac = [{ "ip": "172.16.62.212", "mac": "a4:ae:12:79:c9:81" }, { "ip": "172.16.62.213", "mac": "a4:ae:12:79:5a:27" }] + // create_test_setup(ip_mac) + let service_port_map = get_service_port_map() + get_subnets(service_port_map["snm"]) + get_nodes(service_port_map["nm"]) + get_vpcs(service_port_map["vpm"]) + get_ports(service_port_map["pm"]) +} +======= + let ip_mac = [{ "ip": "172.16.62.212", "mac": "a4:ae:12:79:c9:81" }, { "ip": "172.16.62.213", "mac": "a4:ae:12:79:5a:27" }] + let res = create_test_setup(ip_mac) +} +>>>>>>> recent codes about k6 diff --git a/merak/testscripts/k6/get_test.js b/merak/testscripts/k6/get_test.js new file mode 100644 index 0000000..96d19f3 --- /dev/null +++ b/merak/testscripts/k6/get_test.js @@ -0,0 +1,32 @@ +// MIT License +// Copyright(c) 2020 Futurewei Cloud +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files(the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +import { get_service_port_map } from './alcor_services.js' +import { get_subnets, get_nodes, get_vpcs, get_ports, get_security_group} from './create_test_setup.js' + +// simple test +export default function() { + + let service_port_map = get_service_port_map() + get_subnets(service_port_map["snm"]) + get_nodes(service_port_map["nm"]) + get_vpcs(service_port_map["vpm"]) + get_ports(service_port_map["pm"]) + get_security_group(service_port_map["sgm"]) +} + \ No newline at end of file diff --git a/merak/testscripts/k6/helper_functions.js b/merak/testscripts/k6/helper_functions.js new file mode 100644 index 0000000..db865a0 --- /dev/null +++ b/merak/testscripts/k6/helper_functions.js @@ -0,0 +1,297 @@ +// MIT License +// Copyright(c) 2020 Futurewei Cloud +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files(the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +export function uuid() { + var s = [] + var hexDigits = '0123456789abcdef' + for (var i = 0; i < 36; i++) { + s[i] = hexDigits.substr(Math.floor(Math.random() * 0x10), 1) + } + s[14] = '4' // bits 12-15 of the time_hi_and_version field to 0010 + s[19] = hexDigits.substr((s[19] & 0x3) | 0x8, 1) // bits 6-7 of the clock_seq_hi_and_reserved to 01 + s[8] = s[13] = s[18] = s[23] = '-' + + var uuid = s.join('') + return uuid +} + +function genMAC(){ + var hexDigits = '0123456789abcdef'; + var macAddress = ""; + console.log("in") + for (var i = 0; i < 6; i++) { + macAddress+=hexDigits.charAt(Math.round(Math.random() * 15)); + macAddress+=hexDigits.charAt(Math.round(Math.random() * 15)); + if (i != 5) macAddress += ":"; + } + return macAddress; +} + +// Verify the regularity of IP +var ip_reg = /^(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/; + +// Verify the regularity of the subnet mask +var mask_reg = /^(254|252|248|240|224|192|128|0)\.0\.0\.0|255\.(254|252|248|240|224|192|128|0)\.0\.0|255\.255\.(254|252|248|240|224|192|128|0)\.0|255\.255\.255\.(254|252|248|240|224|192|128|0)$/; + +/*** Convert IP address to binary format + *  @param string   ip    IP address to be converted + */ +export function ip_to_binary(ip) { + if (ip_reg.test(ip)) { + let ip_str = "" + let ip_arr = ip.split(".") + + for (var i = 0; i < 4; i++) { + let curr_num = ip_arr[i]; + let number_bin = parseInt(curr_num); + number_bin = number_bin.toString(2); + let count = 8 - number_bin.length; + for (var j = 0; j < count; j++) { + number_bin = "0" + number_bin; + } + ip_str += number_bin; + } + return ip_str; + } + + return ''; +} + +/***  Convert binary format to IP address + *  @param string   binary    Binary to be converted   + */ +export function binary_to_ip(binary) { + if (binary.length == 32) { + let a = parseInt(binary.substr(0, 8), 2); + let b = parseInt(binary.substr(8, 8), 2); + let c = parseInt(binary.substr(16, 8), 2); + let d = parseInt(binary.slice(-8), 2); + + return a + '.' + b + '.' + c + '.' + d; + } + + return ''; +} + + +/*** Calculate the network address and broadcast address + * according to the subnet mask and gateway + * @param string    ip + *  @param string   mask     + */ +export function get_network_broadcast_addr(ip, mask) { + let network_broadcast = {}; + let network_addr = ""; + + let mask_arr = mask.split("."); + let ip_arr = ip.split("."); + + // Calculate the network address + for (var i = 0; i < 4; i++) { + let number1 = parseInt(mask_arr[i]); + let number2 = parseInt(ip_arr[i]); + network_addr += number1 & number2; + if (i < 3) { + network_addr += "."; + } + } + network_broadcast["network_addr"] = network_addr; + + // Calculate broadcast address + let mask_binary = ip_to_binary(mask); + let gateway_binary = ip_to_binary(ip); + + let mask_zero = mask_binary.split(0).length - 1; + let one_number = new Array(mask_zero + 1).join('1'); // IP地址后位补1 + let broadcast_binary = gateway_binary.slice(0, -mask_zero) + one_number; + + network_broadcast["broadcast_addr"] = binary_to_ip(broadcast_binary); + + return network_broadcast; +} + +// Full Permutation and combination algorithm +function doExchange(doubleArrays) { + let len = doubleArrays.length; + if (len >= 2) { + let len1 = doubleArrays[0].length; + let len2 = doubleArrays[1].length; + let newlen = len1 * len2; + let temp = new Array(newlen); + let index = 0; + for (let i = 0; i < len1; i++) { + for (let j = 0; j < len2; j++) { + temp[index] = doubleArrays[0][i] + '.' + doubleArrays[1][j]; + index++; + } + } + + let newArray = new Array(len - 1); + for (let i = 2; i < len; i++) { + newArray[i - 1] = doubleArrays[i]; + } + newArray[0] = temp; + + return doExchange(newArray); + + } else { + return doubleArrays[0]; + } +} + +/*** Obtain all IP combinations composed of network address and broadcast address + *  @param  string    network_addr     + *  @param  string    broadcast_addr   + *  @param  string    gateway + */ +export function return_ip(network_addr, broadcast_addr, gateway) { + let range = []; + let start = network_addr.split("."); + let end = broadcast_addr.split("."); + + for (let i = 0; i < 4; i++) { + if (start[i] == end[i]) { + range[i] = [start[i]]; + } else { + let min = Math.min(start[i], end[i]); + let max = Math.max(start[i], end[i]); + let temp = []; + for (let j = min; j <= max; j++) { + temp.push(j); + } + range[i] = temp; + } + } + + let ip_list = doExchange(range); + + // ip_list.shift(); // Remove network address + // ip_list.pop(); // Remove broadcast address + // let gateway_index = -1; + + // // Remove gateway + // for (let k = 0; k < ip_list.length; k++) { + // if (ip_list[k] == gateway) { + // gateway_index = k; + // break; + // } + // } + // if (gateway_index > -1) { + // ip_list.splice(gateway_index, 1); + // } + + // //Remove + // let localhost_index = -1; + // for (let k = 0; k < ip_list.length; k++) { + // if (ip_list[k] == "127.0.0.1") { + // localhost_index = k; + // break; + // } + // } + // if (localhost_index > -1) { + // ip_list.splice(localhost_index, 1); + // } + + // //Remove 0.0.0.0 + // let all_zero_index = -1; + // for (let k = 0; k < ip_list.length; k++) { + // if (ip_list[k] == "0.0.0.0") { + // all_zero_index = k; + // break; + // } + // } + // if (all_zero_index > -1) { + // ip_list.splice(all_zero_index, 1); + // } + + return ip_list; +} + +/*** Obtain all IP combinations composed of network address and broadcast address + *  @param  string    cidr_network_addr    + */ +export function get_cidr_network_address_range(cidr_network_addr) { + let network_address = cidr_network_addr.split("/")[0]; + // console.log("network_address: " + network_address + " " + typeof (network_address)) + let mask_len = Number(cidr_network_addr.split("/")[1]); + // console.log("mask_len: " + mask_len + " " + typeof (mask_len)) + let mask_binary = "" + for (let i = 0; i < 32; i++) { + if (i < mask_len) { + mask_binary += "1" + } else { + mask_binary += "0" + } + } + let mask = binary_to_ip(mask_binary) + console.log("mask: " + mask) + return return_ip(network_address, mask) +} + +// Deep copy of object +export function cloneObj(obj) { + if (typeof obj !== 'object') { + return obj; + } else { + var newobj = obj.constructor === Array ? [] : {}; + for (var i in obj) { + newobj[i] = typeof obj[i] === 'object' ? cloneObj(obj[i]) : obj[i]; + } + return newobj; + } +} + + +// K6 test function +export default function() { + let res; + // res = uuid() + // console.log(res) + + // let ip = "127.0.0.0" + // let ip2 = ip_to_binary(ip) + // console.log(ip2) + // ip = binary_to_ip(ip2) + // console.log(ip) + + // let mask = "255.255.255.0" + // let network_broadcast = get_network_broadcast_addr(ip, mask) + // console.log(JSON.stringify(network_broadcast)) + + // res = return_ip(network_broadcast["network_addr"], network_broadcast["broadcast_addr"]) + // console.log(JSON.stringify(res)) + + // let gateway = "127.0.0.254" + // res = return_ip(network_broadcast["network_addr"], network_broadcast["broadcast_addr"], gateway) + // console.log(JSON.stringify(res)) + + // res = get_cidr_network_address_range("10.0.0.0/8") + + // let A = { "B": 1234, "c": [1, 2, 3, 4], "D": { "E": true } } + // let F = cloneObj(A) + // console.log(JSON.stringify(A)) + // console.log(JSON.stringify(F)) + // A["B"] = 88888 + // console.log(JSON.stringify(A)) + // console.log(JSON.stringify(F)) + + res = genMAC() + console.log(res) + + +} \ No newline at end of file diff --git a/merak/testscripts/k6/k6-test/K6-test.md b/merak/testscripts/k6/k6-test/K6-test.md new file mode 100644 index 0000000..13764ab --- /dev/null +++ b/merak/testscripts/k6/k6-test/K6-test.md @@ -0,0 +1,153 @@ +# MIT License +``` +Copyright(c) 2020 Futurewei Cloud +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files(the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +``` + +# K6 Test + +# In this README: +- [MIT License](#mit-license) +- [K6 Test](#k6-test) +- [In this README:](#in-this-readme) + - [Introduction](#introduction) + - [Directory Structure and Files](#directory-structure-and-files) + - [Test Environment Setup](#test-environment-setup) + - [Running Test](#running-test) + + +## Introduction +This is an end to end test based on the K6 scripts rewritten by busybox-ping-test or TC scripts. + + +## Directory Structure and Files +alcor-int/merak/testscripts/k6 +1. alcor_services.js +3. helper_functions.js +4. prepare_payload.js +5. create_test_setup.js +6. create_test_cases.js +7. alcor_http_api_test.js + +## Test Environment Setup +We need 3 clean PCs to deploy Alcor, ACA and K6. + +1. Building Alcor + - We choose a PC to compile and run Alcor, please fork Alcor Github repository by clicking the Fork button on the upper right-hand side of Alcor home page and make a local clone + ``` + $ git clone https://github.com//alcor.git ~/alcor + $ cd ~/alcor + $ git submodule update --init --recursive + $ git remote add upstream https://github.com/futurewei-cloud/alcor.git + $ git pull upstream master + ``` + - Run mvn command to clean install the project for development. You could expect to see BUILD SUCCESS at the end of the build console. + ``` + $ mvn clean install + ``` + - Deploy an Ignite database for local testing and run unit tests. If this step passes, then you have everything needed to develop, test, and run Alcor. + ``` + $ ./scripts/test-prep.sh + $ mvn test + ``` + - Create all the required iamges for aclor + ``` + $ ./scripts/build.sh + $ ./scripts/alcor_services.sh -b + $ ./scripts/alcor_services.sh -a + ``` + - Now, execute ```docker ps```, we can see + ![avatar](./fig/dockerps.png) + +2. Building ACA + - Other PCs are used to compile and run ACA, please fork Alcor Control Agent Github repository by clicking the Fork button on the upper right-hand side of Alcor Control Agent home page and make a local clone + ``` + $ git clone --recurse-submodules https://github.com//alcor-control-agent.git ~/alcor-control-agent + $ cd ~/alcor-control-agent + $ git submodule update --init --recursive + $ git remote add upstream https://github.com/futurewei-cloud/alcor-control-agent.git + $ git pull upstream master + ``` + - Setup a physical machine or VM to compile the alcor-control-agent + ``` + $ cd ~/alcor-control-agent/build + $ sudo ./aca-machine-init.sh + ``` + - Install OVS in ubuntu (18.04) if needed: + ``` + $ apt install openvswitch-switch + ``` + - You can run the test (optional): + ``` + $ ./build/tests/aca_tests + ``` + - You should be ready to run the executable: + ``` + $ ./build/bin/AlcorControlAgent + ``` +PS: There are a large number of downloads in aca-machine-init.sh, which will be downloaded again every time. This may be very unfriendly to users in Chinese Mainland. In this end, we refactor a script named aca-machine-init-local.sh. +``` +https://github.com/VanderChen/alcor-control-agent/blob/build-shell/build/aca-machine-init-local.sh +``` +In Chinese Mainland, we recommend the following ways to setup a physical machine or VM to compile the alcor-control-agent. + ``` + $ cd ~/alcor-control-agent/build + $ mkdir dependencis + $ sudo ./aca-machine-init-local.sh -D ./dependencis + $ cp -r dependencis /var/local/git + $ cd /var/local/git/dependencis + $ mv * ../ + $ cd ~/alcor-control-agent/build + $ sudo ./aca-machine-init-local.sh -i + ``` +Note that we split the download and installation steps so that you don't have to download every time + + +3. Building K6 +- We can deploy K6 on the machine where Alcor is deployed. +``` + $ sudo gpg --no-default-keyring --keyring /usr/share/keyrings/k6-archive-keyring.gpg --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys C5AD17C747E3415A3642D57D77C6C491D6AC1D69 + $ echo "deb [signed-by=/usr/share/keyrings/k6-archive-keyring.gpg] https://dl.k6.io/deb stable main" | sudo tee /etc/apt/sources.list.d/k6.list + $ sudo apt-get update + $ sudo apt-get install k6 +``` + +## Running Test +1. busybox-ping-test +- We first use busybox-ping-test to verify whether our Alcor and ACA deployments are correct.You can optionally provide the paramter "-b build" to build all the docker images of the alcor services. This step need not be followed for any subsequent tests, unless any changes are made in Alcor. +``` + $ ./busybox_ping_test.py -b build +``` +- Next, configure alcor_services.ini and perform a testcase test +``` + $ ./busybox_ping_test.py -t 1 +``` +![avatar](./fig/pingtest.png) + +PS: The busybox-ping-test script will delete the ACA on the host each time and download, install it again. This part of the code can be commented out. + +2. k6 test +- K6 test relies on busybox-ping-test. We can divide the ```main()``` in ping_test.py into three stages to execute: + + - Step 1: read the configuration file, restart the Alcor service, and restart the ACA on the host. + - Step 2: use K6 run to instead prepare_test_xxx or create_test_setup in ping_test.py + - Step 3: busybox_container_deploy and run_ping_test + +3. Test example +- create_test_setup.js test +![avatar](./fig/k6-test-example-1.png) +![avatar](./fig/k6-test-example-2.png) +![avatar](./fig/k6-test-example-3.png) \ No newline at end of file diff --git a/merak/testscripts/k6/k6-test/fig/dockerps.png b/merak/testscripts/k6/k6-test/fig/dockerps.png new file mode 100644 index 0000000..3d580bf Binary files /dev/null and b/merak/testscripts/k6/k6-test/fig/dockerps.png differ diff --git a/merak/testscripts/k6/k6-test/fig/k6-test-example-1.png b/merak/testscripts/k6/k6-test/fig/k6-test-example-1.png new file mode 100644 index 0000000..a0da780 Binary files /dev/null and b/merak/testscripts/k6/k6-test/fig/k6-test-example-1.png differ diff --git a/merak/testscripts/k6/k6-test/fig/k6-test-example-2.png b/merak/testscripts/k6/k6-test/fig/k6-test-example-2.png new file mode 100644 index 0000000..970751a Binary files /dev/null and b/merak/testscripts/k6/k6-test/fig/k6-test-example-2.png differ diff --git a/merak/testscripts/k6/k6-test/fig/k6-test-example-3.png b/merak/testscripts/k6/k6-test/fig/k6-test-example-3.png new file mode 100644 index 0000000..ba53559 Binary files /dev/null and b/merak/testscripts/k6/k6-test/fig/k6-test-example-3.png differ diff --git a/merak/testscripts/k6/k6-test/fig/pingtest.png b/merak/testscripts/k6/k6-test/fig/pingtest.png new file mode 100644 index 0000000..ea98ad7 Binary files /dev/null and b/merak/testscripts/k6/k6-test/fig/pingtest.png differ diff --git a/merak/testscripts/k6/luyao.js b/merak/testscripts/k6/luyao.js new file mode 100644 index 0000000..6d3fe32 --- /dev/null +++ b/merak/testscripts/k6/luyao.js @@ -0,0 +1,428 @@ +import { sleep } from 'k6'; +import { post_httprequest, get_httprequest, put_httprequest } from './prepare_payload.js' + + + +export function uuid() { + var s = [] + var hexDigits = '0123456789abcdef' + for (var i = 0; i < 36; i++) { + s[i] = hexDigits.substr(Math.floor(Math.random() * 0x10), 1) + } + s[14] = '4' // bits 12-15 of the time_hi_and_version field to 0010 + s[19] = hexDigits.substr((s[19] & 0x3) | 0x8, 1) // bits 6-7 of the clock_seq_hi_and_reserved to 01 + s[8] = s[13] = s[18] = s[23] = '-' + + var uuid = s.join('') + return uuid +} + +function genMAC(){ + var hexDigits = '0123456789abcdef'; + var macAddress = ""; + for (var i = 0; i < 6; i++) { + macAddress+=hexDigits.charAt(Math.round(Math.random() * 15)); + macAddress+=hexDigits.charAt(Math.round(Math.random() * 15)); + if (i != 5) macAddress += ":"; + } + return macAddress; +} + + +// Using vpc_manager (port 9001) create a default segment table for admin purposes +export function create_default_segment_table() { + console.log("Create default segment table"); + let url = "http://localhost:9001/segments/createDefaultTable"; + sleep(3); + post_httprequest(url) + console.log("FINISH: Created default segment table\n"); +} + +// Using vpc_manager (port 9001), create a VPC +// project ID: 3dda2801-d675-4688-a63f-dcda8d327f50 +// ID: 9192a4d4-ffff-4ece-b3f0-8d36e3d88001 (possibly network ID) +export function create_vpc() { + console.log("Creating VPC\n") + let network = {} + let url = "http://localhost:9001/project/3dda2801-d675-4688-a63f-dcda8d327f50/vpcs" + + let network_info = { + "admin_state_up": true, + "revision_number": 0, + "cidr": "10.0.0.0/16", + "default": true, + "description": "vpc", + "dns_domain": "domain", + "id": "9192a4d4-ffff-4ece-b3f0-8d36e3d88001", + "is_default": true, + "mtu": 1400, + "name": "sample_vpc", + "port_security_enabled": true, + "project_id": "3dda2801-d675-4688-a63f-dcda8d327f50" + } + network["network"] = network_info + sleep(3); + post_httprequest(url, network); + console.log("FINISH: Created VPC\n") +} + +// { "ip": "172.16.62.212", "mac": "a4:ae:12:79:c9:81" }, { "ip": "172.16.62.213", "mac": "a4:ae:12:79:5a:27" } +// Node ID: 1112a4d4-ffff-4ece-b3f0-8d36e3d85001 + +var host_name = new Array() +var host_ip = new Array() +var host_mac = new Array() +var host_id = new Array() + +export function host_init(){ + for(let i=0;i<50;i++){ + host_name.push(`node${i}`) + host_ip[i]=`172.16.62.${i+10}` + let mac_temp=genMAC() + host_mac.push(mac_temp) + let id_temp ="" + if(i<10){ + id_temp = `1112a4d4-ffff-4ece-b3f0-8d36e3d8500${i}` + } + else if(i<100){ + id_temp = `1112a4d4-ffff-4ece-b3f0-8d36e3d850${i}` + } + else{ + id_temp = `1112a4d4-ffff-4ece-b3f0-8d36e3d85${i}` + } + host_id.push(id_temp) + } + // console.log(JSON.stringify(host_name)) + // console.log(JSON.stringify(host_ip)) + // console.log(JSON.stringify(host_mac)) + // console.log(JSON.stringify(host_id)) +} + +export function create_nodes() { + console.log("Creating nodes"); + let url = "http://localhost:9007/nodes"; + + for (let i = 0; i < host_id.length; i++) { + let data = { + "host_info": { + "local_ip": host_ip[i], + "mac_address": host_mac[i], + "node_id": host_id[i], + "node_name": host_name[i], + "server_port": 8080, + "veth": "eth0" + } + } + sleep(3); + post_httprequest(url, data); + } + console.log("FINISH: Created nodes\n") +} + + +// Using subnet manager (9002) to create subnet +// Subnet1 name: subnet1 +// Subnet1 ID: 8182a4d4-ffff-4ece-b3f0-8d36e3d88001 +export function create_subnet1() { + console.log("Creating Subnet1\n") + + let url = "http://localhost:9002/project/3dda2801-d675-4688-a63f-dcda8d327f50/subnets" + + let data = { + "subnet": { + "cidr": "10.0.1.0/24", + "id": "8182a4d4-ffff-4ece-b3f0-8d36e3d88001", + "ip_version": 4, + "network_id": "9192a4d4-ffff-4ece-b3f0-8d36e3d88001", + "name": "subnet1", + "host_routes": [ + { + "destination": "10.0.2.0/24", + "nexthop": "10.0.1.1" + } + ] + } + } + post_httprequest(url, data) + console.log("FINISH: Creating Subnet1\n") +} + + +// Subnet2 ID: 8182a4d4-ffff-4ece-b3f0-8d36e3d88002 +// export function create_subnet2() { +// console.log("Creating Subnet2\n") + +// let url = "http://localhost:9002/project/3dda2801-d675-4688-a63f-dcda8d327f50/subnets" + +// let data = { +// "subnet": { +// "cidr": "10.0.2.0/24", +// "id": "8182a4d4-ffff-4ece-b3f0-8d36e3d88002", +// "ip_version": 4, +// "network_id": "9192a4d4-ffff-4ece-b3f0-8d36e3d88001", +// "name": "subnet2", +// "host_routes": [ +// { +// "destination": "10.0.1.0/24", +// "nexthop": "10.0.2.1" +// } +// ] +// } +// } +// post_httprequest(url, data) +// console.log("FINISH: Creating Subnet2\n") +// } + + + + +// Using route manager (port 9002) +// export function create_route() { +// console.log("Creating Router\n") + +// let url = "http://localhost:9003/project/3dda2801-d675-4688-a63f-dcda8d327f50/routers" + +// let data={ +// "router": { +// "admin_state_up": true, +// "availability_zone_hints": [ +// "string" +// ], +// "availability_zones": [ +// "string" +// ], +// "conntrack_helpers": [ +// "string" +// ], +// "description": "string", +// "distributed": true, +// "external_gateway_info": { +// "enable_snat": true, +// "external_fixed_ips": [ +// ], +// "network_id": "9192a4d4-ffff-4ece-b3f0-8d36e3d88001" +// }, +// "flavor_id": "string", +// "gateway_ports": [ +// ], +// "ha": true, +// "id": "11112801-d675-4688-a63f-dcda8d327f50", +// "name": "router1", +// "owner": "9192a4d4-ffff-4ece-b3f0-8d36e3d88001", +// "project_id": "3dda2801-d675-4688-a63f-dcda8d327f50", +// "revision_number": 0, +// "routetable": { +// }, +// "service_type_id": "string", +// "status": "BUILD", +// "tags": [ +// "string" +// ], +// "tenant_id": "3dda2801-d675-4688-a63f-dcda8d327f50" +// } +// } +// post_httprequest(url, data) +// console.log("FINISH: Creating Router\n") +// } + + +// export function attach_subnets_to_router() +// { +// console.log("Attach Subnets to Router\n") + +// let url = "http://10.213.43.161:9003/project/3dda2801-d675-4688-a63f-dcda8d327f50/routers/11112801-d675-4688-a63f-dcda8d327f50/add_router_interface"; +// let data = { +// "subnet_id": "8182a4d4-ffff-4ece-b3f0-8d36e3d88001" +// } +// put_httprequest(url, data) + +// sleep(3); + +// url = "http://10.213.43.161:9003/project/3dda2801-d675-4688-a63f-dcda8d327f50/routers/11112801-d675-4688-a63f-dcda8d327f50/add_router_interface"; + +// data = { +// "subnet_id": "8182a4d4-ffff-4ece-b3f0-8d36e3d88002" +// } +// put_httprequest(url, data) +// console.log("FINISH: Attach Subnets to Router\n") + +// } + + + + +// Security Group (port 9008) +let sg_id = ["3dda2801-d675-4688-a63f-dcda8d111111","3dda2801-d675-4688-a63f-dcda8d111112","3dda2801-d675-4688-a63f-dcda8d111113","3dda2801-d675-4688-a63f-dcda8d111114"] +let sg_name = ["sg1","sg2","sg3","sg4"] +export function create_security_group() +{ + console.log("Creating Security Group\n") + let url = "http://localhost:9008/project/3dda2801-d675-4688-a63f-dcda8d327f50/security-groups"; + for (let i = 0; i < sg_id.length; i++) { + let data= { + "security_group": { + "create_at": "string", + "description": "string", + "id": sg_id[i], + "name": sg_name[i], + "project_id": "3dda2801-d675-4688-a63f-dcda8d327f50", + "security_group_rules": [ + ], + "tenant_id": "3dda2801-d675-4688-a63f-dcda8d327f50", + "update_at": "string" + } + } + sleep(3); + post_httprequest(url, data) + console.log("FINISH: Creating Security Group\n") + } +} + +// Using Port manager, create the end port: +// Ensure your IPs are within the subnet you created (line 119) +// IP, name and ID should be unique for eqach request +// Ensure to use same security group ID 3dda2801-d675-4688-a63f-dcda8d111111 + + + + +// let subnet_ids = ["8182a4d4-ffff-4ece-b3f0-8d36e3d88001","8182a4d4-ffff-4ece-b3f0-8d36e3d88002"] + + + +// let device_id = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001","8182a4d4-ffff-4ece-b3f0-8d36e3d00002","8182a4d4-ffff-4ece-b3f0-8d36e3d00003","8182a4d4-ffff-4ece-b3f0-8d36e3d00001","8182a4d4-ffff-4ece-b3f0-8d36e3d00002","8182a4d4-ffff-4ece-b3f0-8d36e3d00003"] + +// let device_id = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001","8182a4d4-ffff-4ece-b3f0-8d36e3d00002","8182a4d4-ffff-4ece-b3f0-8d36e3d00003","8182a4d4-ffff-4ece-b3f0-8d36e3d00004","8182a4d4-ffff-4ece-b3f0-8d36e3d00005","8182a4d4-ffff-4ece-b3f0-8d36e3d00006"] +// let device_id = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001","8182a4d4-ffff-4ece-b3f0-8d36e3d00002"] + + +// let ip_addrs = ["10.0.1.101","10.0.1.102","10.0.1.103","10.0.2.201","10.0.2.202","10.0.2.203"] +// let ip_addrs = ["10.0.1.101","10.0.1.102","10.0.1.103","10.0.1.104","10.0.1.105","10.0.1.106"] +// let ip_addrs = ["10.0.1.301","10.0.1.302"] + + +// let port_id = ["7172a4d4-ffff-4ede-b3ml0-8d36e3d00101","7172a4d4-ffff-4ede-b3ml0-8d36e3d00102","7172a4d4-ffff-4ede-b3ml0-8d36e3d00103","7172a4d4-ffff-4ede-b3ml0-8d36e3d00201","7172a4d4-ffff-4ede-b3ml0-8d36e3d00202","7172a4d4-ffff-4ede-b3ml0-8d36e3d00203"] +// let port_id = ["7172a4d4-ffff-4ede-b3ml0-8d36e3d00301","7172a4d4-ffff-4ede-b3ml0-8d36e3d00302"] + + +// let port_name = ["port101","port102","port103","port201","port202","port203"] +// let port_name = ["port301","port302"] + +var port_name = new Array() +var port_id = new Array() +var ip_addrs = new Array() + +export function port_init(){ + for(let i=0;i<250;i++){ + port_name.push(`port${i}`) + ip_addrs[i]=`10.0.1.${i+2}` + let id_temp ="" + if(i<10){ + id_temp = `7172a4d4-ffff-4ede-b3ml0-8d36e3d00${i}` + } + else if(i<100){ + id_temp = `7172a4d4-ffff-4ede-b3ml0-8d36e3d0${i}` + } + else{ + id_temp = `7172a4d4-ffff-4ede-b3ml0-8d36e3d${i}` + } + port_id.push(id_temp) + } + // console.log(JSON.stringify(port_name)) + // console.log(JSON.stringify(port_id)) + // console.log(JSON.stringify(ip_addrs)) +} + + +export function create_port(){ + let url = "http://localhost:9006/project/3dda2801-d675-4688-a63f-dcda8d327f50/ports"; + for (let i = 0; i < port_id.length; i++) { + let j = i % host_id.length; + let k = i % sg_id.length; + let device_id_temp = uuid() + let data = { + "port": { + "admin_state_up" : true, + "allowed_address_pairs": [ + { + "ip_address": "11.11.11.1", + "mac_address": "00-AA-BB-15-EB-3F" + } + ], + "binding:host_id": host_name[j], + "binding:vif_details": {}, + "create_at": "string", + "description": "string", + "device_id": device_id_temp, + "device_owner": "compute:nova", + "dns_assignment": {}, + "dns_domain": "string", + "dns_name": "string", + "extra_dhcp_opts": [ + { + "ip_version": "string", + "opt_name": "string", + "opt_value": "string" + } + ], + "fast_path": true, + "fixed_ips": [ + { + "ip_address": ip_addrs[i], + "subnet_id": "8182a4d4-ffff-4ece-b3f0-8d36e3d88001" + } + ], + "id": port_id[i], + "mac_learning_enabled": true, + "name": port_name[i], + "network_id": "9192a4d4-ffff-4ece-b3f0-8d36e3d88001", + "network_ns": "string", + "port_security_enabled": true, + "project_id": "3dda2801-d675-4688-a63f-dcda8d327f50", + "qos_network_policy_id": "string", + "qos_policy_id": "string", + "revision_number": 0, + "security_groups": [ + sg_id[k] + ], + "tags": [ + "string" + ], + "tenant_id": "3dda2801-d675-4688-a63f-dcda8d327f50", + "update_at": "string", + "uplink_status_propagation": true, + "veth_name": "string" + } + } + sleep(3); + post_httprequest(url, data) + } + + console.log("FINISH: Create Port\n") +} + + + + + +// 比如在一个VPC中,有几百个虚机,每次给10分之一的虚机下发port,在同一子网。最后下发4个安全组,安全组就跨子网地随机选一些虚机。 + +export const options = { + scenarios: { + scenario: { + executor: 'shared-iterations', + maxDuration: '200m', + }, + }, +}; + +export default function () { + create_default_segment_table(); + create_vpc() + host_init() + create_nodes() + create_subnet1() + create_security_group() + port_init() + create_port() +} diff --git a/merak/testscripts/k6/prepare_payload.js b/merak/testscripts/k6/prepare_payload.js new file mode 100644 index 0000000..a900ad9 --- /dev/null +++ b/merak/testscripts/k6/prepare_payload.js @@ -0,0 +1,155 @@ +// MIT License +// Copyright(c) 2020 Futurewei Cloud +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files(the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import http from 'k6/http'; + +var ip_mac_db = {} +var K6_test_flag = false + +export function put_httprequest(url, data = {}) { + let headers = {} + let response = {} + if (K6_test_flag == true) { + headers = { 'Content-Type': 'application/json' }; + } + else { + headers = { + 'Content-Type': 'application/json', + 'Accept': '*/*', + } + } + console.log("PUTing http request") + console.log("url: " + url) + console.log("data: " + JSON.stringify(data)) + try { + response = http.put(url, JSON.stringify(data), { headers: headers }) + if (response.error_code == 0) { + console.log("PUT Success: " + url) + } + else { + console.log("PUT Fail, error_code: " + response.error_code + " , " + response.error) + } + return response.body + } + catch (err) { + console.log("PUT Failed for " + url + " with error_code: " + response.error_code + " , " + response.error) + return response.error + } +} + + +export function post_httprequest(url, data = {}) { + let headers = {} + if (K6_test_flag == true) { + headers = { + 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Mobile Safari/537.36' + } + } + else { + headers = { + 'Content-Type': 'application/json', + 'Accept': '*/*', + } + } + let response = {} + console.log("POSTing http request") + console.log("url: " + url) + console.log("data: " + JSON.stringify(data)) + + try { + // Using an object as body, the headers will automatically include + // 'Content-Type: application/x-www-form-urlencoded'. + response = http.post(url, JSON.stringify(data), { headers: headers }) + console.log(response.json()); + if (response.error_code == 0) { + console.log("POST Success: " + url) + if ("ports" in url) { + let valid_response = response.body.json() + console.log(`POST RESPONSE: ${JSON.stringify(valid_response)}`) + get_mac_for_ips(valid_response) + } + } + else { + console.log("POST Fail, error_code: " + response.error_code + " , " + response.error) + } + return response.body + } + catch (err) { + console.log("POST Failed for " + url + " with error_code: " + + response.error_code + " , " + response.error) + return response.error + } +} + + +export function get_mac_for_ips(valid_response) { + console.log("in prepare_payload: ", JSON.stringify(valid_response)) + let ports_info = valid_response["port"] + let key = ports_info["fixed_ips"][0]["ip_address"] + let value = ports_info["mac_address"] + ip_mac_db[key] = value + console.log("IP_MAC_DB = ", JSON.stringify(ip_mac_db)) +} + +export function get_httprequest(url) { + try { + let response = http.get(url) + if (response.error_code == 0) { + console.log("GET Success: " + url) + } + else { + console.log("GET Fail, error_code: " + response.error_code + " , " + response.error) + } + return response.body + } + catch (err) { + console.log("GET Failed for " + url + " with error_code: " + response.error_code + " , " + response.error) + return response.error + } + +} + + +export function get_mac_from_db() { + console.log("\n\n\n>>>>>>>") + console.log("IP & MAC stored in ignite db", JSON.stringify(ip_mac_db)) + return ip_mac_db +} + +// simple test +// export default function () { +// console.log("################ put test ################"); +// let url = 'https://httpbin.test.k6.io/put'; +// let data = { name: 'Bert' }; +// let res = put_httprequest(url, JSON.stringify(data)) +// console.log(JSON.stringify(res)); + +// console.log("################ post test ################"); +// url = 'https://fanyi.baidu.com/sug'; +// data = { 'kw': "hello" }; +// res = post_httprequest(url, JSON.stringify(data)); +// console.log(JSON.stringify(res)); + +// console.log("################ get test ################"); +// url = 'https://test.k6.io'; +// res = get_httprequest(url); +// console.log(JSON.stringify(res)); + +// console.log("################ get_mac_from_db ################"); +// get_mac_from_db() +// } +