diff --git a/.applatix/be-controller-wf.yaml b/.applatix/be-controller-wf.yaml new file mode 100644 index 0000000..bda08a2 --- /dev/null +++ b/.applatix/be-controller-wf.yaml @@ -0,0 +1,27 @@ + +--- +type: workflow +name: workflow +description: test-workflow + +fixtures: + - influxsrv: + template: influxsrv + +inputs: + parameters: + commit: + default: "%%session.commit%%" + repo: + default: "%%session.repo%%" + +steps: +- + checkout: + template: checkout +- + build: + template: test + parameters: + code: "%%steps.checkout.code%%" + influxHost: "%%fixtures.influxsrv.ip%%" \ No newline at end of file diff --git a/.applatix/checkout.yaml b/.applatix/checkout.yaml new file mode 100644 index 0000000..8585a69 --- /dev/null +++ b/.applatix/checkout.yaml @@ -0,0 +1,24 @@ +# checkout.yaml +--- +type: container +name: checkout +description: Checks out commit to /src and export it as an artifact + +inputs: + parameters: + commit: + default: "%%session.commit%%" + repo: + default: "%%session.repo%%" + +outputs: + artifacts: + code: + path: /src + +container: + image: get.applatix.io/applatix/axscm:v2.0 + resources: + mem_mib: 256 + cpu_cores: 0.1 + command: axscm clone %%repo%% /src --commit %%commit%% diff --git a/.applatix/fixtures.yaml b/.applatix/fixtures.yaml new file mode 100644 index 0000000..85365d2 --- /dev/null +++ b/.applatix/fixtures.yaml @@ -0,0 +1,9 @@ +# fixtures.yaml +# Test Environment +--- +type: container +name: influxsrv +description: Influx DB instnce +container: + image: hyperpilot/influx:1.2.2 + docker_options: -e ADMIN_USER=root -e INFLUXDB_INIT_PWD=root \ No newline at end of file diff --git a/.applatix/policy.yaml b/.applatix/policy.yaml new file mode 100644 index 0000000..eaebcd0 --- /dev/null +++ b/.applatix/policy.yaml @@ -0,0 +1,22 @@ +# policy.yaml +--- +type: policy +name: be-ctrl-workflow-policy +description: Trigger workflow on pushes, pull requests and as a cron job +template: workflow +notifications: + - + when: + - on_success + - on_failure + whom: + - committer + - author +when: + - + event: on_push + target_branches: + - "unit_test" +labels: + milestone: unit_test + version: 1.0.1 \ No newline at end of file diff --git a/.applatix/test.yaml b/.applatix/test.yaml new file mode 100644 index 0000000..f5fd54e --- /dev/null +++ b/.applatix/test.yaml @@ -0,0 +1,21 @@ +# test.yaml +--- +type: container +name: test +description: Build-and-test-the-code + +inputs: + artifacts: + - from: "%%code%%" + path: /src + parameters: + code: + influxHost: + +container: + resources: + mem_mib: 256 + cpu_cores: 1.0 + image: python:2 + command: sh -c 'cd /src && pip install -r requirements.txt && . ./run_tests.sh' + docker_options: -e "INFLUXDB_HOST=%%influxHost%%" diff --git a/.gitignore b/.gitignore index 7e99e36..947445a 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ -*.pyc \ No newline at end of file +*.pyc +.venv/ +.cache/ \ No newline at end of file diff --git a/blkioclass.py b/blkioclass.py index 15d7424..eecb03a 100644 --- a/blkioclass.py +++ b/blkioclass.py @@ -31,7 +31,7 @@ def __init__(self, block_dev, max_rd_iops, max_wr_iops): self.keys = set() # check if blockio is active - if not os.path.isdir('/sys/fs/cgroup/blkio/kubepods'): + if not os.path.isdir('/sys/fs/cgroup/blkio'): raise Exception('Blkio not configured for K8S') diff --git a/blkiocontrol.py b/blkiocontrol.py index 801a6ff..1a7131a 100644 --- a/blkiocontrol.py +++ b/blkiocontrol.py @@ -19,6 +19,21 @@ # hyperpilot imports import settings as st import blkioclass as blkioclass +import os + +def GetBlkioPath(base_path, pod, container_id): + """ + construct blkio path + Parameters: + pod: Pod instance + container_id: desire container + """ + if pod.qosclass == 'guaranteed': + root = os.path.join(base_path, 'pod{}'.format(pod.uid), container_id) + else: + root = os.path.join(base_path, pod.qosclass.lower(), "pod{}".format(pod.uid), container_id) + + return root def BlkioControll(): """ Blkio controller @@ -59,12 +74,13 @@ def BlkioControll(): active_be_ids = set() st.active.lock.acquire_read() for _, pod in st.active.pods.items(): - if pod.qosclass == 'guaranteed': - root = 'kubepods/' + 'pod' + pod.uid + '/' - else: - root = 'kubepods/' + pod.qosclass.lower() + '/pod' + pod.uid + '/' + # if pod.qosclass == 'guaranteed': + # root = netst['blkio_path'] + 'pod' + pod.uid + '/' + # else: + # root = netst['blkio_path'] + pod.qosclass.lower() + '/pod' + pod.uid + '/' for cont in pod.container_ids: - key = root + cont + # key = root + cont + key = getBlkioPath(netst['blkio_path'], pod, cont) active_ids.add(key) if pod.wclass == 'BE': active_be_ids.add(key) diff --git a/config.json b/config.json index 537cdff..2ae87ff 100644 --- a/config.json +++ b/config.json @@ -1,6 +1,13 @@ { - "mode" : "k8s", - "ctlloc" : "in", + "mode": "k8s", + "ctlloc": "in", + "influx_db": { + "host": "influxsrv.hyperpilot", + "port": 8086, + "user": "root", + "password": "root", + "db": "be_controller" + }, "quota_controller": { "period": 5, "slack_threshold_disable": -0.5, @@ -33,7 +40,8 @@ "max_rd_iops": 1500, "hp_iops": 1000, "disabled": false, - "write_metrics": true + "write_metrics": true, + "blkio_path": "kubepods/" }, "write_metrics": true } diff --git a/controller.daemonset.yaml b/controller.daemonset.yaml index 0c56ffe..7fb8aec 100644 --- a/controller.daemonset.yaml +++ b/controller.daemonset.yaml @@ -40,4 +40,4 @@ spec: name: lib imagePullSecrets: - name: myregistrykey - terminationGracePeriodSeconds: 10 + terminationGracePeriodSeconds: 10 \ No newline at end of file diff --git a/maincontrol.py b/maincontrol.py index 2897534..b5f1c4c 100644 --- a/maincontrol.py +++ b/maincontrol.py @@ -371,7 +371,10 @@ def __init__(): # parse arguments st.params = ParseArgs() - st.stats_writer = store.InfluxWriter() + influx_conn = st.params['influx_db'] + st.stats_writer = store.InfluxWriter(influx_conn['host'], influx_conn['port'], + influx_conn['user'], influx_conn['password'], + influx_conn['be_controller']) if st.get_param("write_metrics", None, False) is True: # flatten the setting params @@ -532,4 +535,4 @@ def __init__(): cycle += 1 time.sleep(period) -__init__() +__init__() \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..95b4920 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,17 @@ +# Requirements automatically generated by pigar. +# https://github.com/Damnever/pigar + +# maincontrol.py: 28 +docker == 2.1.0 + +# store.py: 1,2 +influxdb == 4.0.0 + +# maincontrol.py: 29,30 +kubernetes == 2.0.0 + +# maincontrol.py: 27 +pycurl == 7.43.0 + +# urllib +urllib3>=1.19.1,!=1.21 \ No newline at end of file diff --git a/run_tests.sh b/run_tests.sh index 58bd33e..bf012bd 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -1 +1 @@ -python -m unittest netclass_tests +python -m unittest discover \ No newline at end of file diff --git a/settings.py b/settings.py index c0c73ae..af4fbc5 100644 --- a/settings.py +++ b/settings.py @@ -12,6 +12,7 @@ from kubernetes import watch import rwlock import store +import os class Container(object): """ A class for tracking active containers @@ -80,7 +81,7 @@ def add_pod(self, k8s_object, key): pod.namespace = k8s_object.metadata.namespace pod.uid = k8s_object.metadata.uid pod.ipaddress = k8s_object.status.pod_ip - pod.qosclass = k8s_object.status.qos_class.lower() + pod.qosclass = k8s_object.status.qos_class.lower() if k8s_object.status.qos_class != None else None pod.wclass = ExtractWClass(k8s_object) if pod.wclass == 'BE' and pod.qosclass != 'besteffort': print "K8SWatch:WARNING: Pod %s is not BestEffort in K8S" %(key) diff --git a/store.py b/store.py index 1eb3a54..40559fd 100644 --- a/store.py +++ b/store.py @@ -1,16 +1,23 @@ from influxdb import InfluxDBClient from influxdb.client import InfluxDBClientError +import json class InfluxWriter(object): - def __init__(self): - self.client = InfluxDBClient( - "influxsrv.hyperpilot", 8086, "root", "root", "be_controller") - try: - self.client.create_database("be_controller") - except InfluxDBClientError: - pass #Ignore + + def __init__(self, host=None, port=None, user=None, password=None, db=None): + if any(x is None for x in [host, port, user, password, db]): + self.client = None + else: + self.client = InfluxDBClient( + host, port, user, password, db) + try: + self.client.create_database(db) + except InfluxDBClientError: + pass #Ignore def write(self, time, hostname, controller, data): + if self.client is None: + raise Exception("store:ERROR: influxdb client not confgure properly") try: self.client.write_points([{ "time": time, diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/kube_helper.py b/tests/kube_helper.py new file mode 100644 index 0000000..ef8b110 --- /dev/null +++ b/tests/kube_helper.py @@ -0,0 +1,142 @@ +# helper.py +from kubernetes import client, config, watch +import datetime +import time +import json +import uuid + +class Constants(object): + """ + Constant Class use for kube_helper internally + """ + # kubernetes pod status check interval + K8S_POD_STATUS_CHECK_INTERVAL = 2 + # kubernetes response code: duplicated namespace + K8S_RESP_CODE_DUPLICATE_NAMESPACE = 409 + # kubernetes response reason: already exist + K8S_RESP_REASON_DUPLICATE_NAMESPACE = 'AlreadyExists' + +class KubeHelper(object): + """ + Kubernetes helper (for test) + """ + def __init__(self): + self.podKey = "" + try: + # default ckeck if this test running inside a kube clouster + print('trying load incluster config') + config.load_incluster_config() + self.client = client + except Exception as e: + # load from Env + try: + print('trying load kube config file') + config.load_kube_config() + except Exception as e: + print "Cannot initialize K8S environment:", e + raise e + + def deleteDemoPods(self, namespace='kubernetes-plugin'): + v1 = client.CoreV1Api() + name = self.podName + body = client.V1DeleteOptions() + try: + v1.delete_namespaced_pod(name, namespace, body) + except Exception as e: + print 'delete pod error', e + + def create_namespace(self, ns_name): + v1 = client.CoreV1Api() + body = client.V1Namespace() + body.metadata = client.V1ObjectMeta(name=ns_name) + try: + v1.create_namespace(body) + except client.rest.ApiException as ae: + if ae.status == Constants.K8S_RESP_CODE_DUPLICATE_NAMESPACE and \ + json.loads(ae.body)['reason'] == Constants.K8S_RESP_REASON_DUPLICATE_NAMESPACE: + print "namespace {} already Exists".format(ns_name) + return + else: + raise Exception("create namespace error: ", ae) + + + def delete_namespace(self, namespace='kubernetes-plugin'): + v1 = client.CoreV1Api() + try: + v1.delete_namespace(namespace, client.V1DeleteOptions()) + except Exception as e: + pass + + def _generateTimeBaseRandomString(self): + return uuid.uuid4().hex + + def createDemoPod(self, namespace='kubernetes-plugin', BE=True): + """ + Create Demo Pod for unit test + """ + v1 = client.CoreV1Api() + pod = client.V1Pod() + + # check if namespace exists + + # if namespace not exists, create one + self.create_namespace(namespace) + + + label = {'hyperpilot.io/wclass' : 'HP'} + postfix = self._generateTimeBaseRandomString() + name = 'demo-hp-pod-' + postfix + if BE: + label = {'hyperpilot.io/wclass' : 'BE'} + name = 'demo-be-pod-' + postfix + + pod.metadata = client.V1ObjectMeta(name=name, labels=label) + # requirement = client.V1ResourceRequirements(requests={'cpu', '150m'}) + # pod.resources = requirement + containers = [] + for x in range(2): + container = client.V1Container() + container.image = "busybox" + container.args = ['sleep', '3600'] + container.name = 'busybox' + self._generateTimeBaseRandomString() + container.security_context = client.V1SecurityContext(privileged=True) + volumeMounts = [] + dockerSock = client.V1VolumeMount(mount_path='/var/run/docker.sock', name='docker-sock') + volumeMounts.append(dockerSock) + commandSock = client.V1VolumeMount(mount_path='/var/run/command.sock', name='command-sock') + volumeMounts.append(commandSock) + container.volume_mounts = volumeMounts + containers.append(container) + spec = client.V1PodSpec() + spec.containers = containers + volumes = [] + volumes.append(client.V1Volume(host_path=client.V1HostPathVolumeSource(path='/var/run/docker.sock'), name='docker-sock')) + volumes.append(client.V1Volume(host_path=client.V1HostPathVolumeSource(path='/var/run/command.sock'), name='command-sock')) + volumes.append(client.V1Volume(host_path=client.V1HostPathVolumeSource(path='/sbin'), name='sbin')) + volumes.append(client.V1Volume(host_path=client.V1HostPathVolumeSource(path='/lib'), name='lib')) + spec.volumes = volumes + spec.security_context = client.V1SecurityContext(privileged=True) + pod.spec = spec + response = v1.create_namespaced_pod(namespace, pod) + try: + pod = self.watchForStatus(namespace, name) + self.podKey = response.metadata.namespace + '/' + response.metadata.name + self.podName = response.metadata.name + return pod + except Exception as e: + print 'error response', e + print response + return "" + + def watchForStatus(self, namespace, podName): + v1 = client.CoreV1Api() + pod = None + while True: + time.sleep(Constants.K8S_POD_STATUS_CHECK_INTERVAL) + pods = v1.list_namespaced_pod(namespace) + find = filter(lambda x: x.metadata.name == podName, pods.items) + # print "get pod: ", find[0] + if find[0].status.phase == 'Running': + # print "watched pod:", find[0] + break + return find[0] diff --git a/tests/kubernetes-plugin.yaml b/tests/kubernetes-plugin.yaml new file mode 100644 index 0000000..6eba492 --- /dev/null +++ b/tests/kubernetes-plugin.yaml @@ -0,0 +1,126 @@ +# kubernetes-plugin.yaml +# Use for Jenkins CI +--- +apiVersion: "v1" +kind: "List" +items: + + - apiVersion: "v1" + kind: "Namespace" + metadata: + name: "kubernetes-plugin" + labels: + name: "kubernetes-plugin" + + # jenkins + + - apiVersion: "v1" + kind: "PersistentVolume" + metadata: + name: "jenkins" + namespace: "kubernetes-plugin" + spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 100Mi + hostPath: + path: "/data/kubernetes-plugin-jenkins" + + - apiVersion: "v1" + kind: "PersistentVolumeClaim" + metadata: + name: "jenkins" + namespace: "kubernetes-plugin" + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Mi + storageClassName: "" + + - apiVersion: "v1" + kind: "ReplicationController" + metadata: + name: "jenkins" + namespace: "kubernetes-plugin" + labels: + name: "jenkins" + spec: + replicas: 1 + template: + metadata: + name: "jenkins" + labels: + name: "jenkins" + spec: + containers: + - name: "jenkins" + image: "jenkins:2.46.2-alpine" + ports: + - containerPort: 8080 + - containerPort: 50000 + resources: + limits: + cpu: 1 + memory: 1Gi + requests: + cpu: 0.5 + memory: 500Mi + env: + - name: CPU_REQUEST + valueFrom: + resourceFieldRef: + resource: requests.cpu + - name: CPU_LIMIT + valueFrom: + resourceFieldRef: + resource: limits.cpu + - name: MEM_REQUEST + valueFrom: + resourceFieldRef: + resource: requests.memory + divisor: "1Mi" + - name: MEM_LIMIT + valueFrom: + resourceFieldRef: + resource: limits.memory + divisor: "1Mi" + - name: JAVA_OPTS + value: "-Xmx$(MEM_REQUEST)m -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85" + volumeMounts: + - name: "jenkins" + mountPath: "/var/jenkins_home" + livenessProbe: + httpGet: + path: /login + port: 8080 + initialDelaySeconds: 60 + timeoutSeconds: 5 + securityContext: + fsGroup: 1000 + volumes: + - name: "jenkins" + persistentVolumeClaim: + claimName: "jenkins" + + - apiVersion: "v1" + kind: "Service" + metadata: + name: "jenkins" + namespace: "kubernetes-plugin" + spec: + type: "NodePort" + selector: + name: "jenkins" + ports: + - + name: "http" + port: 80 + targetPort: 8080 + protocol: "TCP" + - + name: "slave" + port: 50000 + protocol: "TCP" \ No newline at end of file diff --git a/tests/test-serviceAccount.yaml b/tests/test-serviceAccount.yaml new file mode 100644 index 0000000..d93b314 --- /dev/null +++ b/tests/test-serviceAccount.yaml @@ -0,0 +1,23 @@ +# test-serviceAccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ut-account + namespace: kubernetes-plugin + labels: + ut: be-controller +--- +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: unit-test + labels: + ut: be-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: +- kind: ServiceAccount + name: ut-account + namespace: kubernetes-plugin \ No newline at end of file diff --git a/tests/test_blkioclass.py b/tests/test_blkioclass.py new file mode 100644 index 0000000..90abd51 --- /dev/null +++ b/tests/test_blkioclass.py @@ -0,0 +1,79 @@ +import unittest +from blkioclass import BlkioClass +from blkiocontrol import GetBlkioPath +import json +import settings as st +import os +from kube_helper import KubeHelper +import time + +class BlkioClassTestCase(unittest.TestCase): + + def setUp(self): + fileDir = os.path.dirname(os.path.realpath('__file__')) + with open(os.path.join(fileDir, 'config.json'), 'r') as json_data_file: + st.params = json.load(json_data_file) + netst = st.params['blkio_controller'] + print "set max read iops: {}, max write iops: {}".format(netst['max_rd_iops'], netst['max_wr_iops']) + self.blkio = BlkioClass(netst['block_dev'], netst['max_rd_iops'], netst['max_wr_iops']) + st.enabled = True + + # create demo pod + self.kubehelper = KubeHelper() + self.demoPod = self.kubehelper.createDemoPod(BE=True) + + # self.cont_key = "kubepods/besteffort/pod{podId}/{contId}".format(podId=self.demoPod.metadata.uid, contId=self.demoPod.status.container_statuses[0].container_id.strip("docker://")) + pod = st.Pod() + pod.name = self.demoPod.metadata.name + pod.namespace = self.demoPod.metadata.namespace + pod.uid = self.demoPod.metadata.namespace + pod.qosclass = self.demoPod.status.qos_class.lower() if self.demoPod.status.qos_class != None else None + self.cont_key = GetBlkioPath(netst['blkio_path'], pod, self.demoPod.status.container_statuses[0].container_id.strip("docker://")) + + + def tearDown(self): + self.kubehelper.deleteDemoPods() + + def generateContKey(self, v1pod, container_id): + path_template = "kubepods/besteffort/pod{podId}/{contId}" + return path_template.format(podId=v1pod.metadata.uid, contId=container_id) + + def test_blkio(self): + # add be cont + + self.blkio.addBeCont(self.cont_key) + self.assertTrue(self.cont_key in self.blkio.keys, msg='container key not add to keys') + + # double add same container id + self.assertRaises + with self.assertRaises(Exception): + self.blkio.addBeCont(self.cont_key) + + # test_setIopsLimit(self): + print "test setIopsLimit" + riops = st.params['blkio_controller']['max_rd_iops'] + wiops = st.params['blkio_controller']['max_wr_iops'] + + # set iops limit under limit + self.blkio.setIopsLimit(riops * 0.8, wiops * 0.8) + + iops = self.blkio.getIopUsed(self.cont_key) + self.assertLessEqual(iops[0], riops, msg='riops still greater then upper limit') + self.assertLessEqual(iops[1], wiops, msg='wiops still greater then upper limit') + + print "set iops limit over limit: riops: {}, wiops: {}".format(riops * 1.5, wiops * 1.5) + + with self.assertRaises(Exception): + self.blkio.setIopsLimit(riops * 1.5, wiops * 1.5) + + print "test clearIopsLimit" + self.blkio.clearIopsLimit() + _ = self.blkio.getIopUsed(self.cont_key) + print "after clear iops limit: {}".format(_) + self.assertEqual(0, _[0], msg="riops not reset, got value: {}".format(_[0])) + self.assertEqual(0, _[1], msg="wiops not reset, got value: {}".format(_[1])) + + # remove be cont + self.blkio.removeBeCont(self.cont_key) + self.assertFalse(self.cont_key in self.blkio.keys, msg='container key still remain in keys') + diff --git a/tests/test_command_client.py b/tests/test_command_client.py new file mode 100644 index 0000000..70db6ee --- /dev/null +++ b/tests/test_command_client.py @@ -0,0 +1,48 @@ +import unittest +from command_client import * + +class CommandClientTestCase(unittest.TestCase): + + def runningInDocker(self): + try: + with open('/proc/self/cgroup', 'r') as procfile: + for line in procfile: + fields = line.strip().split('/') + if fields[1] == 'docker': + return True + + return False + except Exception as e: + return False + + + def setUp(self): + if self.runningInDocker(): + self.ctloc = "in" + else: + self.ctloc = "out" + + def test_run_command(self): + + cmd = CommandClient(self.ctloc) + if self.ctloc == "in": + self.assertTrue(type(cmd.client) is UnixSocketClient, msg=None) + else: + self.assertTrue(type(cmd.client) is SubprocessClient, msg=None) + + result = cmd.run_command('echo hello world!!!') + self.assertEqual(('hello world!!!\n', None), result, msg="Not expected result") + + def test_run_commands(self): + commands = [ + 'echo hello', + 'echo world', + 'echo command', + 'echo client' + ] + cmd = CommandClient(self.ctloc) + result = cmd.run_commands(commands) + self.assertTrue(result, msg="fail run commands") + +if __name__ == '__main__': + unittest.main() diff --git a/netclass_tests.py b/tests/test_netclass.py similarity index 96% rename from netclass_tests.py rename to tests/test_netclass.py index d1f454c..e8afab9 100644 --- a/netclass_tests.py +++ b/tests/test_netclass.py @@ -1,7 +1,7 @@ import unittest import netclass as nc -class TestNetclassMethods(unittest.TestCase): +class NetclassTestCase(unittest.TestCase): def test_parse_bw_stats(self): s = """ class htb 1:10 root prio 0 rate 664Mbit ceil 664Mbit burst 1494b cburst 1494b @@ -49,4 +49,4 @@ class htb 1:1 root prio 0 rate 10Gbit ceil 10Gbit burst 0b cburst 0b self.assertEqual(nc.NetClass.parseBwStats(s), {10: 123000000 / 1000000.0, 1: 2395000 / 1000000.0}) if __name__ == '__main__': - unittest.main() + unittest.main() diff --git a/tests/test_settings.py b/tests/test_settings.py new file mode 100644 index 0000000..e40442a --- /dev/null +++ b/tests/test_settings.py @@ -0,0 +1,99 @@ +# test_settings.py +import unittest +import settings as st +from kube_helper import KubeHelper +import time +import docker +import os + + +class ActivePodsTestCase(unittest.TestCase): + + def setUp(self): + self.helperBE = KubeHelper() + self.helperHP = KubeHelper() + self.demoBEPod = self.helperBE.createDemoPod(BE=True) + self.demoHPPod = self.helperHP.createDemoPod(BE=False) + self.podBEkey = self.helperBE.podKey + self.podHPkey = self.helperHP.podKey + self.configSetting() + + def tearDown(self): + self.helperBE.deleteDemoPods() + self.helperHP.deleteDemoPods() + + def configSetting(self): + helper = KubeHelper() + st.node.keuv = helper.client.CoreV1Api() + st.node.denv = docker.from_env() + st.enabled = True + + def test_crud_pod(self): + + # test add pod + oriBEPodCnt = st.active.be_pods + st.active.add_pod(self.demoBEPod, self.podBEkey) + self.assertTrue(len(list(filter(lambda x: st.active.pods[x].name == self.demoBEPod.metadata.name, st.active.pods))) > 0, msg="BE Pod not added to tracking list") + self.assertTrue((st.active.be_pods - oriBEPodCnt == 1), msg="be_pods not update") + + oriHPPodCnt = st.active.hp_pods + st.active.add_pod(self.demoHPPod, self.podHPkey) + self.assertTrue(len(list(filter(lambda x: st.active.pods[x].name == self.demoHPPod.metadata.name, st.active.pods))) > 0, msg="HP Pod not added to tracking list") + self.assertTrue((st.active.hp_pods - oriHPPodCnt == 1), msg="hp_pods number not update") + + # test modify pod + minQuota = 50000 + cont_id = '' + cont = None + try: + cont_id = self.demoBEPod.status.container_statuses[0].container_id.strip('docker://') + cont = st.node.denv.containers.get(cont_id) + + except Exception as e: + cont_id = self.demoBEPod.status.container_statuses[1].container_id.strip('docker://') + cont = st.node.denv.containers.get(cont_id) + + beforeCpuQuota = cont.attrs['HostConfig']['CpuQuota'] + beforeCpuPeriod = cont.attrs['HostConfig']['CpuPeriod'] + + st.active.modify_pod(self.demoBEPod, self.podBEkey, minQuota) + + time.sleep(10) + cont = st.node.denv.containers.get(cont_id) + + afterCpuQuota = cont.attrs['HostConfig']['CpuQuota'] + afterCpuPeriod = cont.attrs['HostConfig']['CpuPeriod'] + + self.assertEqual(minQuota, afterCpuQuota, msg="Not apply correctly") + self.assertEqual(100000, afterCpuPeriod, msg="Cpu Period is not apply") + + + st.active.modify_pod(self.demoHPPod, self.podHPkey, minQuota) + try: + cont_id = self.demoHPPod.status.container_statuses[0].container_id.strip('docker://') + cont = st.node.denv.containers.get(cont_id) + except Exception as e: + cont_id = self.demoHPPod.status.container_statuses[1].container_id.strip('docker://') + cont = st.node.denv.containers.get(cont_id) + + afterCpuQuota = cont.attrs['HostConfig']['CpuQuota'] + afterCpuPeriod = cont.attrs['HostConfig']['CpuPeriod'] + + self.assertNotEqual(minQuota, afterCpuQuota, msg="should not apply cpu quota") + self.assertNotEqual(100000, afterCpuPeriod, msg="should not apply cpu period") + + # st.active.modify_pod(self.demoPod, self.podkey) + + # def test delete pod + st.active.delete_pod(self.podBEkey) + self.assertEqual(len(list(filter(lambda x: st.active.pods[x].name == self.demoBEPod.metadata.name, st.active.pods))), 0, msg="BE pod not stop tracking") + st.active.delete_pod(self.podHPkey) + self.assertEqual(len(list(filter(lambda x: st.active.pods[x].name == self.demoHPPod.metadata.name, st.active.pods))), 0, msg="HP pod not stop tracking") + + # def test_ExtractWClass(self): + self.assertEqual(st.ExtractWClass(self.demoBEPod), 'BE', msg="expect extract BE, but it's not") + self.assertEqual(st.ExtractWClass(self.demoHPPod), 'HP', msg="expect extract HP, but it's not") + + # def test_K8SWatch(self): + # print "Hmm...i donno know how to test this function..., let's just pass this for now" + diff --git a/tests/test_store.py b/tests/test_store.py new file mode 100644 index 0000000..e14a481 --- /dev/null +++ b/tests/test_store.py @@ -0,0 +1,29 @@ +# test_store.py +import unittest +from store import InfluxWriter +import influxdb +import docker +import datetime +import os + + +class StoreTestCase(unittest.TestCase): + + def test_write(self): + try: + result = self.influxWriter.write(datetime.datetime.now(), "test", "huh", {"field1": "value1", "field2": "value2", "field3": "value3"}) + # test if data is the same as write + except Exception as e: + self.assertTrue(False, msg="writting error with %s" % e) + + + def setUp(self): + try: + influxHost = os.getenv('INFLUXDB_HOST', default='localhost') + influxPort = os.getenv('INFLUXDB_PORT', default=8086) + influxUser = os.getenv('INFLUXDB_USER', default='root') + password = os.getenv('INFLUXDB_PASSWORD', default='root') + db = os.getenv('INFLUXDB_DB', default='be_controller') + self.influxWriter = InfluxWriter(host=influxHost, port=influxPort, user=influxUser, password=password, db=db) + except Exception as e: + raise AssertionError("Couldn't connect to InfluxDB, be sure InfluxDB is running and reachable.") diff --git a/ut-images/Dockerfile b/ut-images/Dockerfile new file mode 100644 index 0000000..2faaa3a --- /dev/null +++ b/ut-images/Dockerfile @@ -0,0 +1,7 @@ +# Dockerfile +FROM hyperpilot/influx:1.2.2 +RUN apt-get update +# RUN apt-get install -y build-essential autoconf libtool pkg-config python-opengl python-imaging python-pyrex python-pyside.qtopengl idle-python2.7 qt4-dev-tools qt4-designer libqtgui4 libqtcore4 libqt4-xml libqt4-test libqt4-script libqt4-network libqt4-dbus python-qt4 python-qt4-gl libgle3 python-dev libssl-dev +RUN apt-get install -y python-dev build-essential libssl-dev libffi-dev libxml2-dev libxslt1-dev zlib1g-dev python-pip libpq-dev libldap2-dev libsasl2-dev libgnutls28-dev libcurl4-gnutls-dev +RUN pip install pytest +RUN rm -rf /var/lib/apt/lists/* \ No newline at end of file