Skip to content
This repository was archived by the owner on Mar 31, 2023. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
593 changes: 593 additions & 0 deletions merak/testscripts/TC/alcor_http_api_test.java

Large diffs are not rendered by default.

53 changes: 53 additions & 0 deletions merak/testscripts/TC/application.properties
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
# MIT License
# Copyright(c) 2020 Futurewei Cloud
#
# Permission is hereby granted,
# free of charge, to any person obtaining a copy of this software and associated documentation files(the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and / or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

### Whether to test NCM, or to test Alcor HTTP APIs ###
test_against_ncm = false

### Test Controller NCM test Params ###
number_of_vpcs = 10
ports_node_one = 100
ports_node_two = 200
node_one_ip = 127.0.0.1
node_two_ip = 127.0.0.2
ncm_ip = 127.0.0.3
ncm_port = 321
user_name = ubuntu
password = gfedcba
### 0 is concurrent, others are sequential ###
ping_mode = 1
### 1 to do background ping, otherwise NOT to do background ping ###
background_ping = 1
### 0 to create container and ping, otherwise NOT to create container and NOT to ping ###
create_container_and_ping = 1

### Test Controller Alcor HTTP APIs Test Params ###
vpm_ip = 127.0.0.1
vpm_port = 8080
snm_ip = 127.0.0.1
snm_port = 8081
pm_ip = 127.0.0.1
pm_port = 8081
vpc_cidr_slash = 8
tenant_amount = 1
project_amount_per_tenant = 1
vpc_amount_per_project = 1
subnet_amount_per_vpc = 1
test_vpc_api = true
test_subnet_api = true
test_port_api = true
call_api_rate = 100

#####Spring health#####
management.health.redis.enabled=false
179 changes: 179 additions & 0 deletions merak/testscripts/jinkins/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
# MIT License
```
Copyright(c) 2020 Futurewei Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
```

# Busybox Ping Test

# In this README:
- [MIT License](#mit-license)
- [Busybox Ping Test](#busybox-ping-test)
- [In this README:](#in-this-readme)
- [Introduction](#introduction)
- [Key Features of Test Script](#key-features-of-test-script)
- [Directory Structure and Files](#directory-structure-and-files)
- [Test Configuration](#test-configuration)
- [Example](#example)
- [Preconditions](#preconditions)
- [Running Test](#running-test)
- [After Test Starts](#after-test-starts)
- [ACA on target hosts](#aca-on-target-hosts)
- [Troubleshooting](#troubleshooting)
- [Quick Start](#quick-start)

## Introduction
This is an end to end test where we test two Busybox containers, hosted on same or different hosts, ping each other.The connectivity between the two Busybox containers is provided by Alcor services and Alcor Control Agent.

This is a test document based on the K6 script rewritten by busybox Ping test

## Key Features of Test Script
- Builds Alcor and docker images for all alcor services (optional).
- Stops and removes existing alcor services and starts the alcor services afresh.
- Stops any currently running ACA on target computers where Busybox containers are to be deployed.
- Clears any existing busybox containers on the target hosts.
- Checks if the Alcor Control Agents are running on the target hosts.
- Prepares the testcase using provided test payload in the configuration file and generate the goal state.
- Deploys two busybox containers on the target hosts and assigns the IP/MAC obtained from the goal state generated in previous step.
- Runs a ping command from one container to another.

## Directory Structure and Files
alcor/scripts/test-automation
1. alcor_services.ini
2. ping_test.py
3. helper_functions.py
4. container_ops.py
5. prepare_payload.py
6. create_test_setup.py
7. create_test_cases.py

## Test Configuration
Test configuration should be presented in the file alcor_services.ini. Configuration file has following sections:
1. [services]: Carries the list of alcor service folders, service names and the ports they use.
2. [AlcorControlAgents]: New line separated list of hosts over which ACA is running and on which Busybox containers will be deployed.
3. [test_info]: Carries the data necessary for creating the end goal states.
4. [vpc_info], [node_info], [subnet_info], [security_groups], [port_info]: These carry the test payload that is needed to generate the end goal state.

## Example
You can configure the alcor services name, port number in the following way:
[services]
1. ignite = {"name":"ignite", "port":10800, "path":"/lib/ignite",Dockerfile"}
2. vpc_manager = {"name":"vpm", "port":9001}
3. subnet_manager = {"name":"snm", "port":9002}

With the above configuration the ignite service will be run with the name 'ignite',
The vpc_manager is built from the Dockerfile located in services/vpc_manager folder and the container with the name vpm port 9001 is started.

## Preconditions
Ensure that your target hosts, over which you plan to deploy the Busybox containers
1. Have Alcor Control Agent binaries located at /home/ubuntu/repos/aca/build/bin/AlcorControlAgent

## Running Test
The main file for running the test is ping_test.py. It is a python script that can be run from command prompt in either of the following two ways:
```
python3 busybox_ping_test.py
./busybox_ping_test.py
```

You can optionally provide the paramter "-b build" to build all the docker images of the alcor services. This step need not be followed for any subsequent tests, unless any changes are made in Alcor.

## After Test Starts
1. It will stop, remove existing Alcor services (if present) and start them all (as listed in alcor_services.ini file)
2. Checks the target hosts if any Alcor Control Agent (ACA) is running. If yes, it is killed and ACA restarted.
3. Checks whether the ACAs are running on the targets. If found not running, the test stops.
4. Using the test info and payload provided in config file, generate the end goal states for two end nodes.
5. Deploy two busy box containers con1 and con2 on the target hosts and runs a ping command from one container to another.

## ACA on target hosts
1. Following packages are required to build and run ACA. Install the following packages on target hosts. Though not mentioned below, installing these pacakges will require sudo permissions.
```
openvswitch-switch
openvswitch-common
apache-pulsar-client
apache-pulsar-client-dev
```

2. The library 'openvswitch' is also required. This library can only be installed from source. Get a clone of this library from github and checkout 2.12 branch.
```
https://github.com/openvswitch/ovs.git
```
Install the following packages before building ovs
```
make
autoconf
libtool
c-ares
```
Now go to the ovs source and update the file 'configure.ac' and edit th line carrying LT_INIT to enable shared library creation before building:
* LT_INIT (enable_shared)
```
./configure --prefix=/usr --localstatedir=/var --sysconfdir=/etc
make
make install.
```

3. After the successful installation of ovs, start the following services:
```
sudo systemctl openvswitch-switch restart
sudo /usr/local/share/openvswitch/scripts/ovs-ctl start
```
The script ovs-ctsl starts the services vsdb-server and ovs-vswitchd.

4. If ACA or ovs services throw bridge related errors, clear the existing bridges for any given container on target hosts. The test script takes care of these itself. However, if you ever manually try to start ACA, following commands can be used to clear existing bridges.
```
ovs-vsctl del-br br-tun
ovs-vsctl del-br br-int
ovs-docker del-ports br-int <container_name>
```

5. Following commands can be used to diagnose the target node's ovs services and bridges:
```
ovs-vsctl show
ovs-ofctl dump-flows br-tun
ovs-ofctl dump-flows br-int
```

## Troubleshooting
1) During the runing of test script, the user account 'ubuntu' from Alcor host will be making ssh connection to the target hosts. Ensure that user ubuntu has password less ssh access to the target hosts. Copy the contents of id_rsa.pub file of user 'ubuntu' (located at ~/.ssh) and paste into the file ~/.ssh/authorized_keys on target host.

2) Often after running the tests from a terminal on the Alcor hosts leaves the stdout and stdin in an unknown state. You can fix it by running
```
reset
```
While typing reset command you will not be able to see it. But once run, the terminal is restored.

3) While running the tests from Jenkins, it is essential that the jenkins user also has password less access to the target hosts. Easiest way to ensure that to copy the entire ~/.ssh folder of user 'ubuntu' on to the jenkins home directory, which is usually at /var/lib/jenkins. Ensure while copying that file attributes are preserved.
```
cp -pr /home/ubuntu/.ssh /var/lib/jenkins
chown -R jenkins:jenkins /var/lib/jenkins/.ssh
```
Go through the jenkins help file available in alcor-int repository to get addtional details on running tests through jenkins.

4) If the tests ever fails due to errors from Alcor API calls then observe the output from http get request from these calls. Check the configuration in alcor_services.ini and redeploy by manaully calling these APIs.


## Quick Start
After making the necessary configuration file changes, run the script with following paramters to get started:
1. ./ping_test.py -b build
- build the alcor services and their docker images and
- runs the simple test case of two containers under same subnet and security group pinging each other.
2. ./ping_test.py -t 1
- runs the test case of two busyboxy containers on two subnets and same security group
3. ./ping_test.py -t 2
- runs the test case of two busybox containers on one subnet and two security groups



106 changes: 106 additions & 0 deletions merak/testscripts/jinkins/alcor_services.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
[services]
ignite = {"name":"ignite", "port":10800, "path":"/lib/ignite.Dockerfile"}
# vpc_manager internal port : 9001
vpc_manager = {"name":"vpm", "port":9009}

# Segment handling can't be routed through API gateway but has to go
# directly to vpc_manager but other vpc related requests will have to
# go through API gateway. A segment_service is added as an alias of
# vpc_manager internal port for handling this situation. At present, ping
# test uses it to create default segment table.
segment_service = {"name" : "sgs", "port" : 9001}

# subnet_manager internal port : 9002
subnet_manager = {"name":"snm", "port":9009}

# route_manager internal port : 9003
route_manager = {"name":"rm", "port":9009}

# private_ip_manager internal port : 9004
private_ip_manager = {"name":"pim", "port":9009}

# mac_manager (virtual mac manager) internal port : 9005
mac_manager = {"name":"mm", "port":9009}

# port_manager internal port : 9006
port_manager = {"name":"pm", "port":9009}

# This can't be routed through API GW
node_manager = {"name":"nm", "port":9007}

# security_group_manager internal port : 9008
security_group_manager = {"name":"sgm", "port":9009}

api_gateway = {"name":"ag", "port":9009}

# data_plane_manager internal port : 9010
data_plane_manager = {"name":"dpm", "port":9009}

# elastic_ip_manager internal port : 9011
elastic_ip_manager = {"name":"eim", "port":9009}

# quota_manager internal port : 9012
quota_manager = {"name":"qm", "port":9009}

# network_acl_manager internal port : 9013
network_acl_manager = {"name":"nam", "port":9009}

# network_config_manager internal port : 9014
network_config_manager = {"name":"ncm", "port":9009}

# gateway_manager internal port : 9015
gateway_manager = {"name":"gm", "port":9009}

[AlcorControlAgents]
node1 = 172.31.19.133
node2 = 172.31.21.202

[test_setup]
vpc_id = "9192a4d4-ffff-4ece-b3f0-8d36e3d88001"
project_id = "3dda2801-d675-4688-a63f-dcda8d327f50"
tenant_id = "3dda2801-d675-4688-a63f-dcda8d327f50"
network_id = "9192a4d4-ffff-4ece-b3f0-8d36e3d88001"
cidr = "172.16.0.0/16"
node_id = ["1112a4d4-ffff-4ece-b3f0-8d36e3d85001", "1112a4d4-ffff-4ece-b3f0-8d36e3d85002"]
node_name = ["node1", "node2"]
subnet_id = "8182a4d4-ffff-4ece-b3f0-8d36e3d88001"
security_group_id = "3dda2801-d675-4688-a63f-dcda8d111111"
device_id = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001", "8182a4d4-ffff-4ece-b3f0-8d36e3d00002"]
port_name = ["port101", "port102"]
port_id = ["7122a4d4-ffff-5eee-b3f0-8d36e3d01101", "7122a4d4-ffff-5eee-b3f0-8d36e3d02201"]
ip_addrs = ["172.16.1.101", "172.16.1.102"]
container_names = ["con1", "con2"]

[L3_AttachRouter_then_CreatePorts]
subnet_ids = ["8182a4d4-ffff-4ece-b3f0-8d36e3d88001", "8182a4d4-ffff-4ece-b3f0-8d36e3d88002"]
cidrs = ["172.16.1.0/24", "172.16.2.0/24"]
ip_addrs = ["172.16.1.101", "172.16.2.201"]
subnet_names = ["subnet1", "subnet2"]
device_ids = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001", "8182a4d4-ffff-4ece-b3f0-8d36e3d00002"]

[L2_basic]
security_group_ids =["3dda2801-d675-4688-a63f-dcda8d111111", "3dda2801-d675-4688-a63f-dcda8d111112"]
sg_names= ["sg1","sg2"]
device_ids = ["8182a4d4-ffff-4ece-b3f0-8d36e3d00001", "8182a4d4-ffff-4ece-b3f0-8d36e3d00002"]

[gateways]
gateway_info = [{"gw" : "172.16.1.1" , "ips" : ["172.16.1.101", "172.16.1.102"]}, {"gw" : "172.16.2.1", "ips" : ["172.16.2.201"]}]

[vpc_info]
vpc_info = {"cidr":${test_setup:cidr},"id":${test_setup:vpc_id}, "project_id":${test_setup:project_id}}

[node_info]
node_info = {"node_id":${test_setup:node_id}, "node_name":${test_setup:node_name}, "server_port":8080, "veth":"eth0"}

[subnet_info]
subnet_info = {"cidr":${test_setup:cidr},"id":${test_setup:subnet_id},"ip_version":4,"network_id":${test_setup:network_id},"name":"subnet1","host_routes":[{"destination":"172.16.1.0/24","nexthop":"172.16.1.1"}]}

[security_groups]
security_group_info = {"create_at":"string","description":"string","id":${test_setup:security_group_id},"name":"sg1","project_id":${test_setup:project_id},"security_group_rules":[],"tenant_id":${test_setup:tenant_id},"update_at":"string"}

[port_info]
port_info = {"binding:host_id":${test_setup:node_name},"device_id":${test_setup:device_id},"fixed_ips":${test_setup:ip_addrs},"subnet_id":${test_setup:subnet_id},"id": ${test_setup:port_id},"name": ${test_setup:port_name},"network_id": ${test_setup:network_id},"project_id":${test_setup:project_id},"security_groups":${test_setup:security_group_id},"tenant_id":${test_setup:tenant_id}}

[router_info]
router_info = {"name":"router1","owner":${test_setup:vpc_id},"network_id":${test_setup:network_id},"project_id":${test_setup:project_id},"security_groups":${test_setup:security_group_id},"tenant_id":${test_setup:tenant_id},"id":"11112801-d675-4688-a63f-dcda8d327f50"}

Loading