Skip to content
This repository was archived by the owner on Jan 7, 2023. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 6 additions & 4 deletions conf/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,11 +579,11 @@ def return_os_id(user, nodes):

def add_to_hosts( nodes ):
for node, ip in nodes.items():
res = bash("grep '%s' /etc/hosts" % str(ip)).strip()
res = bash("grep '\<%s\>' /etc/hosts" % str(ip)).strip()
if node in res:
continue
if res != "":
bash("sed -i 's/%s/%s %s/g' /etc/hosts" % (res, res, node))
bash("sed -i 's/\<%s\>/%s %s/g' /etc/hosts" % (res, res, node))
else:
bash("echo %s %s >> /etc/hosts" % (str(ip), node))

Expand Down Expand Up @@ -611,6 +611,8 @@ def eval_args( obj, function_name, args ):

def wait_ceph_to_health( user, controller ):
#wait ceph health to be OK
pass
'''
waitcount = 0
try:
while not check_health( user, controller ) and waitcount < 300:
Expand All @@ -625,7 +627,7 @@ def wait_ceph_to_health( user, controller ):
else:
printout("ERROR","ceph is unHealthy after 300sec waiting, please fix the issue manually",log_level="LVL1")
sys.exit()

'''
def check_health( user, controller ):
check_count = 0
stdout, stderr = pdsh(user, [controller], 'ceph health', option="check_return")
Expand All @@ -643,7 +645,7 @@ def get_ceph_health(user, node):
if len(res):
stdout = res[node]
output["ceph_status"] = stdout['health']['overall_status']
output["detail"] = stdout['health']['checks']
output["detail"] = stdout['health']['timechecks']
if "write_bytes_sec" in stdout['pgmap']:
str_wb = str(stdout['pgmap']['write_bytes_sec'] / 1024 / 1024) + ' MB/s wr, '
str_rop = '0 op/s rd, ' if stdout['pgmap']['read_op_per_sec'] == 0 else str(stdout['pgmap']['read_op_per_sec'] / 1024) + ' kop/s rd, '
Expand Down
51 changes: 46 additions & 5 deletions deploy/mod/deploy_rgw.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ def gen_cephconf(self, option="refresh", ceph_disk=False):
with open("../conf/ceph.conf", 'a+') as f:
f.write("".join(rgw_conf))

'''
def rgw_dependency_install(self):
user = self.cluster["user"]
rgw_nodes = self.cluster["rgw"]
Expand All @@ -146,6 +147,46 @@ def rgw_install(self):
install_method = "yum -y install"
rados_pkg = "ceph-radosgw"
common.pdsh( user, [node], "%s radosgw radosgw-agent --force-yes" % install_method,"console")
'''

def rgw_dependency_install(self):
common.printout("LOG","Check if radosgw dependencies: haproxy installed")
user = self.cluster["user"]
rgw_nodes = self.cluster["rgw"]
res = common.pdsh(user, rgw_nodes, "dpkg -l | grep haproxy", option = "check_return")
if res and res[0]:
common.printout("WARNING","Try to reinstall haproxy",log_level="LVL1")
common.pdsh(user, rgw_nodes, "dpkg -P haproxy")
common.printout("LOG","Reinstall radosgw dependencies: haproxy ")
os_type_list = common.return_os_id( user, rgw_nodes )
for node, os_type in os_type_list.items():
if "Ubuntu" in os_type:
install_method = "apt-get -y install"
elif "CentOS" in os_type:
install_method = "yum -y install"
common.pdsh( user, [node], "%s haproxy" % ( install_method ),"console")


def rgw_install(self):
common.printout("LOG","Check if radosgw dependencies: haproxy installed")
user = self.cluster["user"]
rgw_nodes = self.cluster["rgw"]
res = common.pdsh(user, rgw_nodes, "dpkg -l | grep radosgw", option = "check_return")
if res and res[0]:
common.printout("WARNING","Try to reinstall radosgw, radosgw-agent",log_level="LVL1")
common.pdsh(user, rgw_nodes, "dpkg -P radosgw")
common.pdsh(user, rgw_nodes, "dpkg -P radosgw-agent")
common.printout("LOG","Reinstall radosgw: radosgw, radosgw-agent")
self.install_binary()
os_type_list = common.return_os_id( user, rgw_nodes )
for node, os_type in os_type_list.items():
if "Ubuntu" in os_type:
install_method = "apt-get -y install"
rados_pkg = "radosgw"
elif "CentOS" in os_type:
install_method = "yum -y install"
rados_pkg = "ceph-radosgw"
common.pdsh( user, [node], "%s radosgw radosgw-agent --force-yes" % install_method,"console")

def rgw_deploy(self, rgw_nodes = None):
user = self.cluster["user"]
Expand All @@ -155,27 +196,27 @@ def rgw_deploy(self, rgw_nodes = None):

rgw_node_index = len(self.cluster["rgw"]) - len(rgw_nodes)
rgw_index = rgw_node_index * rgw_ins_per_nodes + 1

common.printout("LOG","deploy radosgw instances")
common.pdsh( user, rgw_nodes, 'sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.radosgw.keyring', 'check_return')
common.pdsh( user, rgw_nodes, 'sudo chmod +r /etc/ceph/ceph.client.radosgw.keyring', 'check_return')

rgw_ins = {}
total_rgw_ins = len(rgw_nodes) * rgw_ins_per_nodes
while ( total_rgw_ins - rgw_index + 1) > 0:
common.printout("LOG","deploy radosgw instances for %s-%s" % (self.cluster['rgw'][rgw_node_index], rgw_index))
host_name_id = self.cluster['rgw'][rgw_node_index]+"-"+str(rgw_index)
# ceph auth for all radosgw instances
common.pdsh( user, [rgw_nodes[0]], 'ceph auth del client.radosgw.%s' %( host_name_id ), 'check_return')
common.pdsh( user, [rgw_nodes[0]], 'ceph auth del client.radosgw.%s' %( host_name_id ), 'check_return')
common.pdsh( user, [rgw_nodes[0]], 'sudo ceph-authtool /etc/ceph/ceph.client.radosgw.keyring -n client.radosgw.%s --gen-key' %(host_name_id), 'check_return')
common.pdsh( user, [rgw_nodes[0]], "sudo ceph-authtool -n client.radosgw.%s --cap osd 'allow rwx' --cap mon 'allow rwx' /etc/ceph/ceph.client.radosgw.keyring" %(host_name_id), 'check_return')
common.pdsh( user, [rgw_nodes[0]], 'sudo ceph -k /etc/ceph/ceph.client.admin.keyring auth add client.radosgw.%s -i /etc/ceph/ceph.client.radosgw.keyring' %(host_name_id), 'check_return')

rgw_ins[host_name_id] = self.cluster["rgw_ip_bond"][self.cluster['rgw'][rgw_node_index]]
if rgw_index % rgw_ins_per_nodes == 0:
rgw_node_index += 1
rgw_index += 1

self.distribute_hosts(rgw_ins)

common.printout("LOG","Completed deploy radosgw instances")
self.distribute_hosts(rgw_ins)

if len(self.cluster['rgw']) == 1:
return
Expand Down