diff --git a/extras/geo-rep/schedule_georep.py b/extras/geo-rep/schedule_georep.py index 80eba9f..238a498 100644 --- a/extras/geo-rep/schedule_georep.py +++ b/extras/geo-rep/schedule_georep.py @@ -169,14 +169,14 @@ def get_bricks(volname): return value -def get_georep_status(mastervol, slave): +def get_georep_status(mainvol, subordinate): session_keys = set() out = {} cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication"] - if mastervol is not None: - cmd += [mastervol] - if slave: - cmd += [slave] + if mainvol is not None: + cmd += [mainvol] + if subordinate: + cmd += [subordinate] cmd += ["status", "--xml"] info = execute(cmd) @@ -186,30 +186,30 @@ def get_georep_status(mastervol, slave): # Get All Sessions for volume_el in tree.findall("geoRep/volume"): sessions_el = volume_el.find("sessions") - # Master Volume name if multiple Volumes + # Main Volume name if multiple Volumes mvol = volume_el.find("name").text # For each session, collect the details for session in sessions_el.findall("session"): - session_slave = "{0}:{1}".format(mvol, session.find( - "session_slave").text) - session_keys.add(session_slave) - out[session_slave] = {} + session_subordinate = "{0}:{1}".format(mvol, session.find( + "session_subordinate").text) + session_keys.add(session_subordinate) + out[session_subordinate] = {} for pair in session.findall('pair'): - master_brick = "{0}:{1}".format( - pair.find("master_node").text, - pair.find("master_brick").text + main_brick = "{0}:{1}".format( + pair.find("main_node").text, + pair.find("main_brick").text ) - out[session_slave][master_brick] = { - "mastervol": mvol, - "slavevol": pair.find("slave").text.split("::")[-1], - "master_node": pair.find("master_node").text, - "master_brick": pair.find("master_brick").text, - "slave_user": pair.find("slave_user").text, - "slave": pair.find("slave").text, - "slave_node": pair.find("slave_node").text, + out[session_subordinate][main_brick] = { + "mainvol": mvol, + "subordinatevol": pair.find("subordinate").text.split("::")[-1], + "main_node": pair.find("main_node").text, + "main_brick": pair.find("main_brick").text, + "subordinate_user": pair.find("subordinate_user").text, + "subordinate": pair.find("subordinate").text, + "subordinate_node": pair.find("subordinate_node").text, "status": pair.find("status").text, "crawl_status": pair.find("crawl_status").text, "entry": pair.find("entry").text, @@ -218,7 +218,7 @@ def get_georep_status(mastervol, slave): "failures": pair.find("failures").text, "checkpoint_completed": pair.find( "checkpoint_completed").text, - "master_node_uuid": pair.find("master_node_uuid").text, + "main_node_uuid": pair.find("main_node_uuid").text, "last_synced": pair.find("last_synced").text, "checkpoint_time": pair.find("checkpoint_time").text, "checkpoint_completion_time": @@ -230,21 +230,21 @@ def get_georep_status(mastervol, slave): return session_keys, out -def get_offline_status(volname, brick, node_uuid, slave): +def get_offline_status(volname, brick, node_uuid, subordinate): node, brick = brick.split(":") - if "@" not in slave: - slave_user = "root" + if "@" not in subordinate: + subordinate_user = "root" else: - slave_user, _ = slave.split("@") + subordinate_user, _ = subordinate.split("@") return { - "mastervol": volname, - "slavevol": slave.split("::")[-1], - "master_node": node, - "master_brick": brick, - "slave_user": slave_user, - "slave": slave, - "slave_node": "N/A", + "mainvol": volname, + "subordinatevol": subordinate.split("::")[-1], + "main_node": node, + "main_brick": brick, + "subordinate_user": subordinate_user, + "subordinate": subordinate, + "subordinate_node": "N/A", "status": "Offline", "crawl_status": "N/A", "entry": "N/A", @@ -252,44 +252,44 @@ def get_offline_status(volname, brick, node_uuid, slave): "meta": "N/A", "failures": "N/A", "checkpoint_completed": "N/A", - "master_node_uuid": node_uuid, + "main_node_uuid": node_uuid, "last_synced": "N/A", "checkpoint_time": "N/A", "checkpoint_completion_time": "N/A" } -def get(mastervol=None, slave=None): +def get(mainvol=None, subordinate=None): """ - This function gets list of Bricks of Master Volume and collects + This function gets list of Bricks of Main Volume and collects respective Geo-rep status. Output will be always ordered as the - bricks list in Master Volume. If Geo-rep status is not available + bricks list in Main Volume. If Geo-rep status is not available for any brick then it updates OFFLINE status. """ out = [] - session_keys, gstatus = get_georep_status(mastervol, slave) + session_keys, gstatus = get_georep_status(mainvol, subordinate) for session in session_keys: - mvol, _, slave = session.split(":", 2) - slave = slave.replace("ssh://", "") - master_bricks = get_bricks(mvol) + mvol, _, subordinate = session.split(":", 2) + subordinate = subordinate.replace("ssh://", "") + main_bricks = get_bricks(mvol) out.append([]) - for brick in master_bricks: + for brick in main_bricks: bname = brick["name"] if gstatus.get(session) and gstatus[session].get(bname, None): out[-1].append(gstatus[session][bname]) else: out[-1].append( - get_offline_status(mvol, bname, brick["hostUuid"], slave)) + get_offline_status(mvol, bname, brick["hostUuid"], subordinate)) return out -def get_summary(mastervol, slave_url): +def get_summary(mainvol, subordinate_url): """ Wrapper function around Geo-rep Status and Gluster Volume Info This combines the output from Bricks list and Geo-rep Status. - If a Master Brick node is down or Status is faulty then increments + If a Main Brick node is down or Status is faulty then increments the faulty counter. It also collects the checkpoint status from all workers and compares with Number of Bricks. """ @@ -297,7 +297,7 @@ def get_summary(mastervol, slave_url): faulty_rows = [] out = [] - status_data = get(mastervol, slave_url) + status_data = get(mainvol, subordinate_url) for session in status_data: session_name = "" @@ -324,17 +324,17 @@ def get_summary(mastervol, slave_url): summary["completed_checkpoints"] += 1 session_name = "{0}=>{1}".format( - row["mastervol"], - row["slave"].replace("ssh://", "") + row["mainvol"], + row["subordinate"].replace("ssh://", "") ) if row["status"] == "Faulty": - faulty_rows.append("{0}:{1}".format(row["master_node"], - row["master_brick"])) + faulty_rows.append("{0}:{1}".format(row["main_node"], + row["main_brick"])) if row["status"] == "Offline": - down_rows.append("{0}:{1}".format(row["master_node"], - row["master_brick"])) + down_rows.append("{0}:{1}".format(row["main_node"], + row["main_brick"])) if summary["active"] == summary["completed_checkpoints"] and \ summary["faulty"] == 0 and summary["offline"] == 0: @@ -349,12 +349,12 @@ def get_summary(mastervol, slave_url): return out -def touch_mount_root(mastervol): +def touch_mount_root(mainvol): # Create a Mount and Touch the Mount point root, # Hack to make sure some event available after # setting Checkpoint. Without this their is a chance of # Checkpoint never completes. - with glustermount("localhost", mastervol) as mnt: + with glustermount("localhost", mainvol) as mnt: execute(["touch", mnt]) @@ -362,21 +362,21 @@ def main(args): turns = 1 # Stop Force - cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", args.mastervol, - "%s::%s" % (args.slave, args.slavevol), "stop", "force"] + cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", args.mainvol, + "%s::%s" % (args.subordinate, args.subordinatevol), "stop", "force"] execute(cmd) output_ok("Stopped Geo-replication") # Set Checkpoint to NOW - cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", args.mastervol, - "%s::%s" % (args.slave, args.slavevol), "config", "checkpoint", + cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", args.mainvol, + "%s::%s" % (args.subordinate, args.subordinatevol), "config", "checkpoint", "now"] execute(cmd) output_ok("Set Checkpoint") # Start the Geo-replication - cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", args.mastervol, - "%s::%s" % (args.slave, args.slavevol), "start"] + cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", args.mainvol, + "%s::%s" % (args.subordinate, args.subordinatevol), "start"] execute(cmd) output_ok("Started Geo-replication and watching Status for " "Checkpoint completion") @@ -387,16 +387,16 @@ def main(args): # Sleep till Geo-rep initializes time.sleep(60) - touch_mount_root(args.mastervol) + touch_mount_root(args.mainvol) - slave_url = "{0}::{1}".format(args.slave, args.slavevol) + subordinate_url = "{0}::{1}".format(args.subordinate, args.subordinatevol) # Loop to Check the Geo-replication Status and Checkpoint # If All Status OK and all Checkpoints complete, # Stop the Geo-replication and Log the Completeness while True: - session_summary = get_summary(args.mastervol, - slave_url) + session_summary = get_summary(args.mainvol, + subordinate_url) if len(session_summary) == 0: # If Status command fails with another transaction error # or any other error. Gluster cmd still produces XML output @@ -425,8 +425,8 @@ def main(args): if summary["checkpoints_ok"]: output_ok("Stopping Geo-replication session now") cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", - args.mastervol, - "%s::%s" % (args.slave, args.slavevol), "stop"] + args.mainvol, + "%s::%s" % (args.subordinate, args.subordinatevol), "stop"] execute(cmd) break else: @@ -434,15 +434,15 @@ def main(args): # was down and came online now. SETATTR on mount is not # recorded, So again issue touch on mount root So that # Stime will increase and Checkpoint will complete. - touch_mount_root(args.mastervol) + touch_mount_root(args.mainvol) # Increment the turns and Sleep for 10 sec turns += 1 duration = int(time.time()) - start_time if args.timeout > 0 and duration > (args.timeout * 60): cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", - args.mastervol, - "%s::%s" % (args.slave, args.slavevol), "stop", "force"] + args.mainvol, + "%s::%s" % (args.subordinate, args.subordinatevol), "stop", "force"] execute(cmd) output_notok("Timed out, Stopping Geo-replication(" "Duration: {0}sec)".format(duration)) @@ -457,12 +457,12 @@ def main(args): if __name__ == "__main__": parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=__doc__) - parser.add_argument("mastervol", help="Master Volume Name") - parser.add_argument("slave", + parser.add_argument("mainvol", help="Main Volume Name") + parser.add_argument("subordinate", help="SLAVEHOST or root@SLAVEHOST " "or user@SLAVEHOST", metavar="SLAVE") - parser.add_argument("slavevol", help="Slave Volume Name") + parser.add_argument("subordinatevol", help="Subordinate Volume Name") parser.add_argument("--interval", help="Interval in Seconds. " "Wait time before each status check", type=int, default=10) @@ -478,7 +478,7 @@ def main(args): try: # Check for session existence cmd = ["/usr/local/sbin/gluster", "volume", "geo-replication", - args.mastervol, "%s::%s" % (args.slave, args.slavevol), "status"] + args.mainvol, "%s::%s" % (args.subordinate, args.subordinatevol), "status"] execute(cmd) main(args) except KeyboardInterrupt: diff --git a/extras/git-branch-diff.py b/extras/git-branch-diff.py index 382513e..62888ae 100755 --- a/extras/git-branch-diff.py +++ b/extras/git-branch-diff.py @@ -167,7 +167,7 @@ def parse_cmd_args (self): " command line parser" author = subprocess.check_output('git config user.email', shell = True).rstrip('\n') - source = "remotes/origin/master" + source = "remotes/origin/main" options = [' --pretty=format:"%h %s" '] path = subprocess.check_output('git rev-parse --show-toplevel', shell = True).rstrip('\n') diff --git a/geo-replication/src/peer_mountbroker.py b/geo-replication/src/peer_mountbroker.py index 10eddc6..e09ee5a 100644 --- a/geo-replication/src/peer_mountbroker.py +++ b/geo-replication/src/peer_mountbroker.py @@ -10,7 +10,7 @@ runcli, oknotok) from prettytable import PrettyTable -LOG_DIR = "/var/log/glusterfs/geo-replication-slaves" +LOG_DIR = "/var/log/glusterfs/geo-replication-subordinates" GEOREP_DIR = "/var/lib/glusterd/geo-replication" GLUSTERD_VOLFILE = "/usr/local/etc/glusterfs/glusterd.vol" @@ -142,9 +142,9 @@ def info(self): class NodeSetup(Cmd): # Test if group exists using `getent group ` # and then group add using `groupadd ` - # chgrp -R /var/log/glusterfs/geo-replication-slaves + # chgrp -R /var/log/glusterfs/geo-replication-subordinates # chgrp -R /var/lib/glusterd/geo-replication - # chmod -R 770 /var/log/glusterfs/geo-replication-slaves + # chmod -R 770 /var/log/glusterfs/geo-replication-subordinates # chmod -R 770 /var/lib/glusterd/geo-replication # mkdir -p # chmod 0711 @@ -237,7 +237,7 @@ def run(self, args): class NodeStatus(Cmd): # Check if Group exists # Check if user exists - # Check directory permission /var/log/glusterfs/geo-replication-slaves + # Check directory permission /var/log/glusterfs/geo-replication-subordinates # and /var/lib/glusterd/geo-replication # Check mount root and its permissions # Check glusterd.vol file for user, group, dir existance diff --git a/geo-replication/syncdaemon/argsupgrade.py b/geo-replication/syncdaemon/argsupgrade.py index 632271d..9129534 100644 --- a/geo-replication/syncdaemon/argsupgrade.py +++ b/geo-replication/syncdaemon/argsupgrade.py @@ -30,7 +30,7 @@ def gethostbyname(hnam): (hnam, ex.strerror)) -def slave_url(urldata): +def subordinate_url(urldata): urldata = urldata.replace("ssh://", "") host, vol = urldata.split("::") vol = vol.split(":")[0] @@ -51,15 +51,15 @@ def init_gsyncd_template_conf(): os.close(fd) -def init_gsyncd_session_conf(master, slave): - slave = slave_url(slave) - master = master.strip(":") - slavehost, slavevol = slave.split("::") - slavehost = slavehost.split("@")[-1] +def init_gsyncd_session_conf(main, subordinate): + subordinate = subordinate_url(subordinate) + main = main.strip(":") + subordinatehost, subordinatevol = subordinate.split("::") + subordinatehost = subordinatehost.split("@")[-1] # Session Config File path = "%s/geo-replication/%s_%s_%s/gsyncd.conf" % ( - GLUSTERD_WORKDIR, master, slavehost, slavevol) + GLUSTERD_WORKDIR, main, subordinatehost, subordinatevol) if os.path.exists(os.path.dirname(path)) and not os.path.exists(path): fd = os.open(path, os.O_CREAT | os.O_RDWR) @@ -91,8 +91,8 @@ def upgrade(): # --glusterd-uuid=f26ac7a8-eb1b-4ea7-959c-80b27d3e43d0 # f241::gv2 p = ArgumentParser() - p.add_argument("master") - p.add_argument("slave") + p.add_argument("main") + p.add_argument("subordinate") p.add_argument("--glusterd-uuid") p.add_argument("-c") p.add_argument("--iprefix") @@ -100,12 +100,12 @@ def upgrade(): pargs = p.parse_known_args(sys.argv[1:])[0] # Overwrite the sys.argv after rearrange - init_gsyncd_session_conf(pargs.master, pargs.slave) + init_gsyncd_session_conf(pargs.main, pargs.subordinate) sys.argv = [ sys.argv[0], "monitor", - pargs.master.strip(":"), - slave_url(pargs.slave), + pargs.main.strip(":"), + subordinate_url(pargs.subordinate), "--local-node-id", pargs.glusterd_uuid ] @@ -113,20 +113,20 @@ def upgrade(): # -c gsyncd.conf --iprefix=/var :gv1 f241::gv2 # --status-get --path /bricks/b1 p = ArgumentParser() - p.add_argument("master") - p.add_argument("slave") + p.add_argument("main") + p.add_argument("subordinate") p.add_argument("-c") p.add_argument("--path") p.add_argument("--iprefix") pargs = p.parse_known_args(sys.argv[1:])[0] - init_gsyncd_session_conf(pargs.master, pargs.slave) + init_gsyncd_session_conf(pargs.main, pargs.subordinate) sys.argv = [ sys.argv[0], "status", - pargs.master.strip(":"), - slave_url(pargs.slave), + pargs.main.strip(":"), + subordinate_url(pargs.subordinate), "--local-path", pargs.path ] @@ -157,38 +157,38 @@ def upgrade(): p = ArgumentParser() p.add_argument("--normalize-url") pargs = p.parse_known_args(sys.argv[1:])[0] - print(("ssh://%s" % slave_url(pargs.normalize_url))) + print(("ssh://%s" % subordinate_url(pargs.normalize_url))) sys.exit(0) elif "--config-get-all" in sys.argv: # -c gsyncd.conf --iprefix=/var :gv1 f241::gv2 --config-get-all p = ArgumentParser() - p.add_argument("master") - p.add_argument("slave") + p.add_argument("main") + p.add_argument("subordinate") p.add_argument("-c") p.add_argument("--iprefix") pargs = p.parse_known_args(sys.argv[1:])[0] - init_gsyncd_session_conf(pargs.master, pargs.slave) + init_gsyncd_session_conf(pargs.main, pargs.subordinate) sys.argv = [ sys.argv[0], "config-get", - pargs.master.strip(":"), - slave_url(pargs.slave), + pargs.main.strip(":"), + subordinate_url(pargs.subordinate), "--show-defaults", "--use-underscore" ] elif "--verify" in sys.argv and "spawning" in sys.argv: # Just checks that able to spawn gsyncd or not sys.exit(0) - elif "--slavevoluuid-get" in sys.argv: - # --slavevoluuid-get f241::gv2 + elif "--subordinatevoluuid-get" in sys.argv: + # --subordinatevoluuid-get f241::gv2 p = ArgumentParser() - p.add_argument("--slavevoluuid-get") + p.add_argument("--subordinatevoluuid-get") p.add_argument("-c") p.add_argument("--iprefix") pargs = p.parse_known_args(sys.argv[1:])[0] - host, vol = pargs.slavevoluuid_get.split("::") + host, vol = pargs.subordinatevoluuid_get.split("::") # Modified sys.argv sys.argv = [ @@ -219,40 +219,40 @@ def upgrade(): # --iprefix=/var :gv1 f241::gv2 p = ArgumentParser() p.add_argument("--create") - p.add_argument("master") - p.add_argument("slave") + p.add_argument("main") + p.add_argument("subordinate") p.add_argument("-c") p.add_argument("--iprefix") pargs = p.parse_known_args(sys.argv[1:])[0] - init_gsyncd_session_conf(pargs.master, pargs.slave) + init_gsyncd_session_conf(pargs.main, pargs.subordinate) # Modified sys.argv sys.argv = [ sys.argv[0], "monitor-status", - pargs.master.strip(":"), - slave_url(pargs.slave), + pargs.main.strip(":"), + subordinate_url(pargs.subordinate), pargs.create ] elif "--config-get" in sys.argv: # -c gsyncd.conf --iprefix=/var :gv1 f241::gv2 --config-get pid-file p = ArgumentParser() p.add_argument("--config-get") - p.add_argument("master") - p.add_argument("slave") + p.add_argument("main") + p.add_argument("subordinate") p.add_argument("-c") p.add_argument("--iprefix") pargs = p.parse_known_args(sys.argv[1:])[0] - init_gsyncd_session_conf(pargs.master, pargs.slave) + init_gsyncd_session_conf(pargs.main, pargs.subordinate) # Modified sys.argv sys.argv = [ sys.argv[0], "config-get", - pargs.master.strip(":"), - slave_url(pargs.slave), + pargs.main.strip(":"), + subordinate_url(pargs.subordinate), "--only-value", "--show-defaults", "--name", @@ -266,20 +266,20 @@ def upgrade(): # --path=/bricks/b1 -c gsyncd.conf :gv1 f241::gv2 # --config-set log_level DEBUG p = ArgumentParser() - p.add_argument("master") - p.add_argument("slave") + p.add_argument("main") + p.add_argument("subordinate") p.add_argument("--config-set", nargs=2) p.add_argument("-c") pargs = p.parse_known_args(sys.argv[1:])[0] - init_gsyncd_session_conf(pargs.master, pargs.slave) + init_gsyncd_session_conf(pargs.main, pargs.subordinate) # Modified sys.argv sys.argv = [ sys.argv[0], "config-set", - pargs.master.strip(":"), - slave_url(pargs.slave), + pargs.main.strip(":"), + subordinate_url(pargs.subordinate), pargs.config_set[0], pargs.config_set[1] ] @@ -300,20 +300,20 @@ def upgrade(): # -c gsyncd.conf --iprefix=/var :gv1 f241::gv2 --config-del log_level p = ArgumentParser() p.add_argument("--config-del") - p.add_argument("master") - p.add_argument("slave") + p.add_argument("main") + p.add_argument("subordinate") p.add_argument("-c") p.add_argument("--iprefix") pargs = p.parse_known_args(sys.argv[1:])[0] - init_gsyncd_session_conf(pargs.master, pargs.slave) + init_gsyncd_session_conf(pargs.main, pargs.subordinate) # Modified sys.argv sys.argv = [ sys.argv[0], "config-reset", - pargs.master.strip(":"), - slave_url(pargs.slave), + pargs.main.strip(":"), + subordinate_url(pargs.subordinate), pargs.config_del.replace("_", "-") ] elif "--delete" in sys.argv: @@ -322,13 +322,13 @@ def upgrade(): p = ArgumentParser() p.add_argument("--reset-sync-time", action="store_true") p.add_argument("--path-list") - p.add_argument("master") - p.add_argument("slave") + p.add_argument("main") + p.add_argument("subordinate") p.add_argument("--iprefix") p.add_argument("-c") pargs = p.parse_known_args(sys.argv[1:])[0] - init_gsyncd_session_conf(pargs.master, pargs.slave) + init_gsyncd_session_conf(pargs.main, pargs.subordinate) paths = pargs.path_list.split("--path=") paths = ["--path=%s" % x.strip() for x in paths if x.strip() != ""] @@ -337,8 +337,8 @@ def upgrade(): sys.argv = [ sys.argv[0], "delete", - pargs.master.strip(":"), - slave_url(pargs.slave) + pargs.main.strip(":"), + subordinate_url(pargs.subordinate) ] sys.argv += paths diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py index 77fca4c..3e23393 100644 --- a/geo-replication/syncdaemon/gsyncd.py +++ b/geo-replication/syncdaemon/gsyncd.py @@ -51,16 +51,16 @@ def main(): # Monitor Status File update p = sp.add_parser("monitor-status") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave details user@host::vol format") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate details user@host::vol format") p.add_argument("status", help="Update Monitor Status") p.add_argument("-c", "--config-file", help="Config File") p.add_argument("--debug", action="store_true") # Monitor p = sp.add_parser("monitor") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave details user@host::vol format") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate details user@host::vol format") p.add_argument("-c", "--config-file", help="Config File") p.add_argument("--pause-on-start", action="store_true", @@ -71,12 +71,12 @@ def main(): # Worker p = sp.add_parser("worker") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave details user@host::vol format") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate details user@host::vol format") p.add_argument("--local-path", help="Local Brick Path") p.add_argument("--feedback-fd", type=int, help="feedback fd between monitor and worker") - p.add_argument("--local-node", help="Local master node") + p.add_argument("--local-node", help="Local main node") p.add_argument("--local-node-id", help="Local Node ID") p.add_argument("--rpc-fd", help="Read and Write fds for worker-agent communication") @@ -84,60 +84,60 @@ def main(): p.add_argument("--is-hottier", action="store_true", help="Is this brick part of hot tier") p.add_argument("--resource-remote", - help="Remote node to connect to Slave Volume") + help="Remote node to connect to Subordinate Volume") p.add_argument("--resource-remote-id", - help="Remote node ID to connect to Slave Volume") - p.add_argument("--slave-id", help="Slave Volume ID") + help="Remote node ID to connect to Subordinate Volume") + p.add_argument("--subordinate-id", help="Subordinate Volume ID") p.add_argument("-c", "--config-file", help="Config File") p.add_argument("--debug", action="store_true") # Agent p = sp.add_parser("agent") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave details user@host::vol format") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate details user@host::vol format") p.add_argument("--local-path", help="Local brick path") - p.add_argument("--local-node", help="Local master node") + p.add_argument("--local-node", help="Local main node") p.add_argument("--local-node-id", help="Local Node ID") - p.add_argument("--slave-id", help="Slave Volume ID") + p.add_argument("--subordinate-id", help="Subordinate Volume ID") p.add_argument("--rpc-fd", help="Read and Write fds for worker-agent communication") p.add_argument("-c", "--config-file", help="Config File") p.add_argument("--debug", action="store_true") - # Slave - p = sp.add_parser("slave") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave details user@host::vol format") + # Subordinate + p = sp.add_parser("subordinate") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate details user@host::vol format") p.add_argument("--session-owner") - p.add_argument("--master-brick", - help="Master brick which is connected to the Slave") - p.add_argument("--master-node", - help="Master node which is connected to the Slave") - p.add_argument("--master-node-id", - help="Master node ID which is connected to the Slave") - p.add_argument("--local-node", help="Local Slave node") - p.add_argument("--local-node-id", help="Local Slave ID") + p.add_argument("--main-brick", + help="Main brick which is connected to the Subordinate") + p.add_argument("--main-node", + help="Main node which is connected to the Subordinate") + p.add_argument("--main-node-id", + help="Main node ID which is connected to the Subordinate") + p.add_argument("--local-node", help="Local Subordinate node") + p.add_argument("--local-node-id", help="Local Subordinate ID") p.add_argument("-c", "--config-file", help="Config File") p.add_argument("--debug", action="store_true") - # All configurations which are configured via "slave-" options + # All configurations which are configured via "subordinate-" options # DO NOT add default values for these configurations, default values # will be picked from template config file - p.add_argument("--slave-timeout", type=int, - help="Timeout to end gsyncd at Slave side") + p.add_argument("--subordinate-timeout", type=int, + help="Timeout to end gsyncd at Subordinate side") p.add_argument("--use-rsync-xattrs", action="store_true") - p.add_argument("--slave-log-level", help="Slave Gsyncd Log level") - p.add_argument("--slave-gluster-log-level", - help="Slave Gluster mount Log level") - p.add_argument("--slave-gluster-command-dir", - help="Directory where Gluster binaries exist on slave") - p.add_argument("--slave-access-mount", action="store_true", - help="Do not lazy umount the slave volume") + p.add_argument("--subordinate-log-level", help="Subordinate Gsyncd Log level") + p.add_argument("--subordinate-gluster-log-level", + help="Subordinate Gluster mount Log level") + p.add_argument("--subordinate-gluster-command-dir", + help="Directory where Gluster binaries exist on subordinate") + p.add_argument("--subordinate-access-mount", action="store_true", + help="Do not lazy umount the subordinate volume") # Status p = sp.add_parser("status") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate") p.add_argument("-c", "--config-file", help="Config File") p.add_argument("--local-path", help="Local Brick Path") p.add_argument("--debug", action="store_true") @@ -151,8 +151,8 @@ def main(): # Config-get p = sp.add_parser("config-get") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate") p.add_argument("--name", help="Config Name") p.add_argument("-c", "--config-file", help="Config File") p.add_argument("--debug", action="store_true") @@ -163,8 +163,8 @@ def main(): # Config-set p = sp.add_parser("config-set") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate") p.add_argument("name", help="Config Name") p.add_argument("value", help="Config Value") p.add_argument("-c", "--config-file", help="Config File") @@ -172,8 +172,8 @@ def main(): # Config-reset p = sp.add_parser("config-reset") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate") p.add_argument("name", help="Config Name") p.add_argument("-c", "--config-file", help="Config File") p.add_argument("--debug", action="store_true") @@ -186,8 +186,8 @@ def main(): # Delete p = sp.add_parser("delete") - p.add_argument("master", help="Master Volume Name") - p.add_argument("slave", help="Slave") + p.add_argument("main", help="Main Volume Name") + p.add_argument("subordinate", help="Subordinate") p.add_argument("-c", "--config-file", help="Config File") p.add_argument('--path', dest='paths', action="append") p.add_argument("--reset-sync-time", action="store_true", @@ -201,25 +201,25 @@ def main(): # variables, use this for adding extra variables extra_tmpl_args = {} - # Add First/Primary Slave host, user and volume - if getattr(args, "slave", None) is not None: - hostdata, slavevol = args.slave.split("::") + # Add First/Primary Subordinate host, user and volume + if getattr(args, "subordinate", None) is not None: + hostdata, subordinatevol = args.subordinate.split("::") hostdata = hostdata.split("@") - slavehost = hostdata[-1] - slaveuser = "root" + subordinatehost = hostdata[-1] + subordinateuser = "root" if len(hostdata) == 2: - slaveuser = hostdata[0] - extra_tmpl_args["primary_slave_host"] = slavehost - extra_tmpl_args["slaveuser"] = slaveuser - extra_tmpl_args["slavevol"] = slavevol + subordinateuser = hostdata[0] + extra_tmpl_args["primary_subordinate_host"] = subordinatehost + extra_tmpl_args["subordinateuser"] = subordinateuser + extra_tmpl_args["subordinatevol"] = subordinatevol # Add Bricks encoded path if getattr(args, "local_path", None) is not None: extra_tmpl_args["local_id"] = escape(args.local_path) - # Add Master Bricks encoded path(For Slave) - if getattr(args, "master_brick", None) is not None: - extra_tmpl_args["master_brick_id"] = escape(args.master_brick) + # Add Main Bricks encoded path(For Subordinate) + if getattr(args, "main_brick", None) is not None: + extra_tmpl_args["main_brick_id"] = escape(args.main_brick) # Load configurations config_file = getattr(args, "config_file", None) @@ -227,13 +227,13 @@ def main(): # Subcmd accepts config file argument but not passed # Set default path for config file in that case # If an subcmd accepts config file then it also accepts - # master and Slave arguments. + # main and Subordinate arguments. if config_file is None and hasattr(args, "config_file"): config_file = "%s/geo-replication/%s_%s_%s/gsyncd.conf" % ( GLUSTERD_WORKDIR, - args.master, - extra_tmpl_args["primary_slave_host"], - extra_tmpl_args["slavevol"]) + args.main, + extra_tmpl_args["primary_subordinate_host"], + extra_tmpl_args["subordinatevol"]) # If Config file path not exists, log error and continue using default conf config_file_error_msg = None @@ -247,9 +247,9 @@ def main(): rconf.config_file = config_file - # Override gconf values from argument values only if it is slave gsyncd + # Override gconf values from argument values only if it is subordinate gsyncd override_from_args = False - if args.subcmd == "slave": + if args.subcmd == "subordinate": override_from_args = True # Load Config file @@ -264,23 +264,23 @@ def main(): if args.subcmd in ("worker", "agent"): # If Worker or agent, then add brick path also to label label = "%s %s" % (args.subcmd, args.local_path) - elif args.subcmd == "slave": - # If Slave add Master node and Brick details - label = "%s %s%s" % (args.subcmd, args.master_node, args.master_brick) + elif args.subcmd == "subordinate": + # If Subordinate add Main node and Brick details + label = "%s %s%s" % (args.subcmd, args.main_node, args.main_brick) # Setup Logger # Default log file log_file = gconf.get("cli-log-file") log_level = gconf.get("cli-log-level") - if getattr(args, "master", None) is not None and \ - getattr(args, "slave", None) is not None: + if getattr(args, "main", None) is not None and \ + getattr(args, "subordinate", None) is not None: log_file = gconf.get("log-file") log_level = gconf.get("log-level") - # Use different log file location for Slave log file - if args.subcmd == "slave": - log_file = gconf.get("slave-log-file") - log_level = gconf.get("slave-log-level") + # Use different log file location for Subordinate log file + if args.subcmd == "subordinate": + log_file = gconf.get("subordinate-log-file") + log_level = gconf.get("subordinate-log-level") if args.debug: log_file = "-" diff --git a/geo-replication/syncdaemon/gsyncdconfig.py b/geo-replication/syncdaemon/gsyncdconfig.py index 86b942d..6cf41df 100644 --- a/geo-replication/syncdaemon/gsyncdconfig.py +++ b/geo-replication/syncdaemon/gsyncdconfig.py @@ -218,12 +218,12 @@ def _load(self): self.default_values[k] = self.gconf.get(k, "") self.gconf[k] = v.strip() - # Overwrite the Slave configurations which are sent as - # arguments to gsyncd slave + # Overwrite the Subordinate configurations which are sent as + # arguments to gsyncd subordinate if self.override_from_args: for k, v in self.args.items(): k = k.replace("_", "-") - if k.startswith("slave-") and k in self.gconf: + if k.startswith("subordinate-") and k in self.gconf: self.gconf[k] = v self._tmpl_substitute() diff --git a/geo-replication/syncdaemon/gsyncdstatus.py b/geo-replication/syncdaemon/gsyncdstatus.py index e8a810f..b847bed 100644 --- a/geo-replication/syncdaemon/gsyncdstatus.py +++ b/geo-replication/syncdaemon/gsyncdstatus.py @@ -57,7 +57,7 @@ def human_time_utc(ts): def get_default_values(): return { - "slave_node": DEFAULT_STATUS, + "subordinate_node": DEFAULT_STATUS, "worker_status": DEFAULT_STATUS, "last_synced": 0, "last_synced_entry": 0, @@ -123,12 +123,12 @@ def set_monitor_status(status_file, status): class GeorepStatus(object): - def __init__(self, monitor_status_file, master_node, brick, master_node_id, - master, slave, monitor_pid_file=None): - self.master = master - slv_data = slave.split("::") - self.slave_host = slv_data[0] - self.slave_volume = slv_data[1].split(":")[0] # Remove Slave UUID + def __init__(self, monitor_status_file, main_node, brick, main_node_id, + main, subordinate, monitor_pid_file=None): + self.main = main + slv_data = subordinate.split("::") + self.subordinate_host = slv_data[0] + self.subordinate_volume = slv_data[1].split(":")[0] # Remove Subordinate UUID self.work_dir = os.path.dirname(monitor_status_file) self.monitor_status_file = monitor_status_file self.filename = os.path.join(self.work_dir, @@ -139,19 +139,19 @@ def __init__(self, monitor_status_file, master_node, brick, master_node_id, os.close(fd) fd = os.open(self.monitor_status_file, os.O_CREAT | os.O_RDWR) os.close(fd) - self.master_node = master_node - self.master_node_id = master_node_id + self.main_node = main_node + self.main_node_id = main_node_id self.brick = brick self.default_values = get_default_values() self.monitor_pid_file = monitor_pid_file def send_event(self, event_type, **kwargs): gf_event(event_type, - master_volume=self.master, - master_node=self.master_node, - master_node_id=self.master_node_id, - slave_host=self.slave_host, - slave_volume=self.slave_volume, + main_volume=self.main, + main_node=self.main_node, + main_node_id=self.main_node_id, + subordinate_host=self.subordinate_host, + subordinate_volume=self.subordinate_volume, brick_path=self.brick, **kwargs) @@ -184,7 +184,7 @@ def _update(self, mergerfunc): def reset_on_worker_start(self): def merger(data): - data["slave_node"] = DEFAULT_STATUS + data["subordinate_node"] = DEFAULT_STATUS data["crawl_status"] = DEFAULT_STATUS data["entry"] = 0 data["data"] = 0 @@ -251,9 +251,9 @@ def set_worker_crawl_status(self, status): logging.info(lf("Crawl Status Change", status=status)) - def set_slave_node(self, slave_node): + def set_subordinate_node(self, subordinate_node): def merger(data): - data["slave_node"] = slave_node + data["subordinate_node"] = subordinate_node return json.dumps(data) self._update(merger) @@ -296,7 +296,7 @@ def get_status(self, checkpoint_time=0): """ Monitor Status ---> Created Started Paused Stopped ---------------------------------------------------------------------- - slave_node N/A VALUE VALUE N/A + subordinate_node N/A VALUE VALUE N/A status Created VALUE Paused Stopped last_synced N/A VALUE VALUE VALUE last_synced_entry N/A VALUE VALUE VALUE @@ -391,7 +391,7 @@ def get_status(self, checkpoint_time=0): data["checkpoint_completion_time_utc"] = DEFAULT_STATUS if data["worker_status"] not in ["Active", "Passive"]: - data["slave_node"] = DEFAULT_STATUS + data["subordinate_node"] = DEFAULT_STATUS if data.get("last_synced_utc", 0) == 0: data["last_synced_utc"] = DEFAULT_STATUS diff --git a/geo-replication/syncdaemon/master.py b/geo-replication/syncdaemon/master.py index ac31e3f..6d1102e 100644 --- a/geo-replication/syncdaemon/master.py +++ b/geo-replication/syncdaemon/master.py @@ -82,8 +82,8 @@ def edct(op, **ed): # The API! -def gmaster_builder(excrawl=None): - """produce the GMaster class variant corresponding +def gmain_builder(excrawl=None): + """produce the GMain class variant corresponding to sync mode""" this = sys.modules[__name__] modemixin = gconf.get("special-sync-mode") @@ -100,7 +100,7 @@ def gmaster_builder(excrawl=None): logging.debug(lf('setting up change detection mode', mode=changemixin)) modemixin = getattr(this, modemixin.capitalize() + 'Mixin') - crawlmixin = getattr(this, 'GMaster' + changemixin.capitalize() + 'Mixin') + crawlmixin = getattr(this, 'GMain' + changemixin.capitalize() + 'Mixin') if gconf.get("use-rsync-xattrs"): sendmarkmixin = SendmarkRsyncMixin @@ -117,11 +117,11 @@ def gmaster_builder(excrawl=None): else: syncengine = RsyncEngine - class _GMaster(crawlmixin, modemixin, sendmarkmixin, + class _GMain(crawlmixin, modemixin, sendmarkmixin, purgemixin, syncengine): pass - return _GMaster + return _GMain # Mixin classes that implement the data format @@ -156,21 +156,21 @@ def native_xtime(xt): def xtime_geq(xt0, xt1): return xt0 >= xt1 - def make_xtime_opts(self, is_master, opts): + def make_xtime_opts(self, is_main, opts): if 'create' not in opts: - opts['create'] = is_master + opts['create'] = is_main if 'default_xtime' not in opts: opts['default_xtime'] = URXTIME def xtime_low(self, rsc, path, **opts): - if rsc == self.master: + if rsc == self.main: xt = rsc.server.xtime(path, self.uuid) else: xt = rsc.server.stime(path, self.uuid) if isinstance(xt, int) and xt == ENODATA: xt = rsc.server.xtime(path, self.uuid) if not isinstance(xt, int): - self.slave.server.set_stime(path, self.uuid, xt) + self.subordinate.server.set_stime(path, self.uuid, xt) if isinstance(xt, int) and xt != ENODATA: return xt if xt == ENODATA or xt < self.volmark: @@ -194,7 +194,7 @@ def keepalive_payload_hook(self, timo, gap): else: # send keep-alive more frequently to # avoid a delay in announcing our volume info - # to slave if it becomes established in the + # to subordinate if it becomes established in the # meantime gap = min(10, gap) return (vi, gap) @@ -209,15 +209,15 @@ def xtime_reversion_hook(self, path, xtl, xtr): def need_sync(self, e, xte, xtrd): return xte > xtrd - def set_slave_xtime(self, path, mark): - self.slave.server.set_stime(path, self.uuid, mark) - # self.slave.server.set_xtime_remote(path, self.uuid, mark) + def set_subordinate_xtime(self, path, mark): + self.subordinate.server.set_stime(path, self.uuid, mark) + # self.subordinate.server.set_xtime_remote(path, self.uuid, mark) class PartialMixin(NormalMixin): - """a variant tuned towards operation with a master - that has partial info of the slave (brick typically)""" + """a variant tuned towards operation with a main + that has partial info of the subordinate (brick typically)""" def xtime_reversion_hook(self, path, xtl, xtr): pass @@ -229,7 +229,7 @@ class RecoverMixin(NormalMixin): of ignoring non-indexed files""" @staticmethod - def make_xtime_opts(is_master, opts): + def make_xtime_opts(is_main, opts): if 'create' not in opts: opts['create'] = False if 'default_xtime' not in opts: @@ -259,7 +259,7 @@ def sendmark_regular(self, *a, **kw): class PurgeNormalMixin(object): def purge_missing(self, path, names): - self.slave.server.purge(path, names) + self.subordinate.server.purge(path, names) class PurgeNoopMixin(object): @@ -339,9 +339,9 @@ def syncdata(self, files): self.syncdata_wait() -class GMasterCommon(object): +class GMainCommon(object): - """abstract class impementling master role""" + """abstract class impementling main role""" KFGN = 0 KNAT = 1 @@ -349,15 +349,15 @@ class GMasterCommon(object): def get_sys_volinfo(self): """query volume marks on fs root - err out on multiple foreign masters + err out on multiple foreign mains """ fgn_vis, nat_vi = ( - self.master.server.aggregated.foreign_volume_infos(), - self.master.server.aggregated.native_volume_info()) + self.main.server.aggregated.foreign_volume_infos(), + self.main.server.aggregated.native_volume_info()) fgn_vi = None if fgn_vis: if len(fgn_vis) > 1: - raise GsyncdError("cannot work with multiple foreign masters") + raise GsyncdError("cannot work with multiple foreign mains") fgn_vi = fgn_vis[0] return fgn_vi, nat_vi @@ -372,13 +372,13 @@ def volmark(self): return self.volinfo['volume_mark'] def get_entry_stime(self): - data = self.slave.server.entry_stime(".", self.uuid) + data = self.subordinate.server.entry_stime(".", self.uuid) if isinstance(data, int): data = None return data def get_data_stime(self): - data = self.slave.server.stime(".", self.uuid) + data = self.subordinate.server.stime(".", self.uuid) if isinstance(data, int): data = None return data @@ -389,25 +389,25 @@ def xtime(self, path, *a, **opts): as of amending, we can create missing xtime, or determine a valid value if what we get is expired (as of the volume mark expiry); way of amendig - depends on @opts and on subject of query (master - or slave). + depends on @opts and on subject of query (main + or subordinate). """ if a: rsc = a[0] else: - rsc = self.master - self.make_xtime_opts(rsc == self.master, opts) + rsc = self.main + self.make_xtime_opts(rsc == self.main, opts) return self.xtime_low(rsc, path, **opts) - def __init__(self, master, slave): - self.master = master - self.slave = slave + def __init__(self, main, subordinate): + self.main = main + self.subordinate = subordinate self.jobtab = {} if gconf.get("sync-method") == "tarssh": - self.syncer = Syncer(slave, self.slave.tarssh, [2]) + self.syncer = Syncer(subordinate, self.subordinate.tarssh, [2]) else: # partial transfer (cf. rsync(1)), that's normal - self.syncer = Syncer(slave, self.slave.rsync, [23, 24]) + self.syncer = Syncer(subordinate, self.subordinate.rsync, [23, 24]) # crawls vs. turns: # - self.crawls is simply the number of crawl() invocations on root # - one turn is a maximal consecutive sequence of crawls so that each @@ -415,7 +415,7 @@ def __init__(self, master, slave): # - self.turns is the number of turns since start # - self.total_turns is a limit so that if self.turns reaches it, then # we exit (for diagnostic purposes) - # so, eg., if the master fs changes unceasingly, self.turns will remain + # so, eg., if the main fs changes unceasingly, self.turns will remain # 0. self.crawls = 0 self.turns = 0 @@ -432,12 +432,12 @@ def __init__(self, master, slave): def init_keep_alive(cls): """start the keep-alive thread """ - timo = gconf.get("slave-timeout", 0) + timo = gconf.get("subordinate-timeout", 0) if timo > 0: def keep_alive(): while True: vi, gap = cls.keepalive_payload_hook(timo, timo * 0.5) - cls.slave.server.keep_alive(vi) + cls.subordinate.server.keep_alive(vi) time.sleep(gap) t = Thread(target=keep_alive) t.start() @@ -464,7 +464,7 @@ def mgmt_lock(self): raise fd = None - bname = str(self.uuid) + "_" + rconf.args.slave_id + "_subvol_" \ + bname = str(self.uuid) + "_" + rconf.args.subordinate_id + "_subvol_" \ + str(rconf.args.subvol_num) + ".lock" mgmt_lock_dir = os.path.join(gconf.get("meta-volume-mnt"), "geo-rep") path = os.path.join(mgmt_lock_dir, bname) @@ -510,7 +510,7 @@ def mgmt_lock(self): def should_crawl(self): if not gconf.get("use-meta-volume"): - return rconf.args.local_node_id in self.master.server.node_uuid() + return rconf.args.local_node_id in self.main.server.node_uuid() if not os.path.ismount(gconf.get("meta-volume-mnt")): logging.error("Meta-volume is not mounted. Worker Exiting...") @@ -540,23 +540,23 @@ def crawlwrap(self, oneshot=False, register_time=None): # no need to maintain volinfo state machine. # in a cascading setup, each geo-replication session is # independent (ie. 'volume-mark' and 'xtime' are not - # propagated). This is because the slave's xtime is now - # stored on the master itself. 'volume-mark' just identifies + # propagated). This is because the subordinate's xtime is now + # stored on the main itself. 'volume-mark' just identifies # that we are in a cascading setup and need to enable # 'geo-replication.ignore-pid-check' option. volinfo_sys = self.volinfo_hook() self.volinfo = volinfo_sys[self.KNAT] - inter_master = volinfo_sys[self.KFGN] - logging.debug("%s master with volume id %s ..." % - (inter_master and "intermediate" or "primary", + inter_main = volinfo_sys[self.KFGN] + logging.debug("%s main with volume id %s ..." % + (inter_main and "intermediate" or "primary", self.uuid)) rconf.volume_id = self.uuid if self.volinfo: if self.volinfo['retval']: - logging.warn(lf("master cluster's info may not be valid", + logging.warn(lf("main cluster's info may not be valid", error=self.volinfo['retval'])) else: - raise GsyncdError("master volinfo unavailable") + raise GsyncdError("main volinfo unavailable") self.lastreport['time'] = time.time() t0 = time.time() @@ -583,16 +583,16 @@ def crawlwrap(self, oneshot=False, register_time=None): self.status.set_passive() # bring up _this_ brick to the cluster stime # which is min of cluster (but max of the replicas) - brick_stime = self.xtime('.', self.slave) - cluster_stime = self.master.server.aggregated.stime_mnt( - '.', '.'.join([str(self.uuid), rconf.args.slave_id])) + brick_stime = self.xtime('.', self.subordinate) + cluster_stime = self.main.server.aggregated.stime_mnt( + '.', '.'.join([str(self.uuid), rconf.args.subordinate_id])) logging.debug(lf("Crawl info", cluster_stime=cluster_stime, brick_stime=brick_stime)) if not isinstance(cluster_stime, int): if brick_stime < cluster_stime: - self.slave.server.set_stime( + self.subordinate.server.set_stime( self.FLAT_DIR_HIERARCHY, self.uuid, cluster_stime) self.upd_stime(cluster_stime) # Purge all changelogs available in processing dir @@ -676,13 +676,13 @@ def wait(self, path, *args): return succeed def sendmark(self, path, mark, adct=None): - """update slave side xtime for @path to master side xtime + """update subordinate side xtime for @path to main side xtime also can send a setattr payload (see Server.setattr). """ if adct: - self.slave.server.setattr(path, adct) - self.set_slave_xtime(path, mark) + self.subordinate.server.setattr(path, adct) + self.set_subordinate_xtime(path, mark) class XCrawlMetadata(object): @@ -694,7 +694,7 @@ def __init__(self, st_uid, st_gid, st_mode, st_atime, st_mtime): self.st_mtime = float(st_mtime) -class GMasterChangelogMixin(GMasterCommon): +class GMainChangelogMixin(GMainCommon): """ changelog based change detection and syncing """ @@ -810,7 +810,7 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): failures1 = [] for failure in failures: if failure[2]['name_mismatch']: - pbname = failure[2]['slave_entry'] + pbname = failure[2]['subordinate_entry'] elif failure[2]['dst']: pbname = failure[0]['entry1'] else: @@ -819,66 +819,66 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): op = failure[0]['op'] # name exists but gfid is different if failure[2]['gfid_mismatch'] or failure[2]['name_mismatch']: - slave_gfid = failure[2]['slave_gfid'] - st = lstat(os.path.join(pfx, slave_gfid)) + subordinate_gfid = failure[2]['subordinate_gfid'] + st = lstat(os.path.join(pfx, subordinate_gfid)) # Takes care of scenarios with no hardlinks if isinstance(st, int) and st == ENOENT: - logging.info(lf('Entry not present on master. Fixing gfid ' - 'mismatch in slave. Deleting the entry', + logging.info(lf('Entry not present on main. Fixing gfid ' + 'mismatch in subordinate. Deleting the entry', retry_count=retry_count, entry=repr(failure))) # Add deletion to fix_entry_ops list - if failure[2]['slave_isdir']: + if failure[2]['subordinate_isdir']: fix_entry_ops.append( edct('RMDIR', - gfid=failure[2]['slave_gfid'], + gfid=failure[2]['subordinate_gfid'], entry=pbname)) else: fix_entry_ops.append( edct('UNLINK', - gfid=failure[2]['slave_gfid'], + gfid=failure[2]['subordinate_gfid'], entry=pbname)) - # Takes care of scenarios of hardlinks/renames on master + # Takes care of scenarios of hardlinks/renames on main elif not isinstance(st, int): - if matching_disk_gfid(slave_gfid, pbname): - # Safe to ignore the failure as master contains same + if matching_disk_gfid(subordinate_gfid, pbname): + # Safe to ignore the failure as main contains same # file with same gfid. Remove entry from entries list - logging.info(lf('Fixing gfid mismatch in slave. ' + logging.info(lf('Fixing gfid mismatch in subordinate. ' ' Safe to ignore, take out entry', retry_count=retry_count, entry=repr(failure))) entries.remove(failure[0]) - # The file exists on master but with different name. + # The file exists on main but with different name. # Probably renamed and got missed during xsync crawl. - elif failure[2]['slave_isdir']: + elif failure[2]['subordinate_isdir']: realpath = os.readlink(os.path.join(gconf.local_path, ".glusterfs", - slave_gfid[0:2], - slave_gfid[2:4], - slave_gfid)) + subordinate_gfid[0:2], + subordinate_gfid[2:4], + subordinate_gfid)) dst_entry = os.path.join(pfx, realpath.split('/')[-2], realpath.split('/')[-1]) src_entry = pbname logging.info(lf('Fixing dir name/gfid mismatch in ' - 'slave', retry_count=retry_count, + 'subordinate', retry_count=retry_count, entry=repr(failure))) if src_entry == dst_entry: - # Safe to ignore the failure as master contains - # same directory as in slave with same gfid. + # Safe to ignore the failure as main contains + # same directory as in subordinate with same gfid. # Remove the failure entry from entries list logging.info(lf('Fixing dir name/gfid mismatch' - ' in slave. Safe to ignore, ' + ' in subordinate. Safe to ignore, ' 'take out entry', retry_count=retry_count, entry=repr(failure))) entries.remove(failure[0]) else: - rename_dict = edct('RENAME', gfid=slave_gfid, + rename_dict = edct('RENAME', gfid=subordinate_gfid, entry=src_entry, entry1=dst_entry, stat=st, link=None) logging.info(lf('Fixing dir name/gfid mismatch' - ' in slave. Renaming', + ' in subordinate. Renaming', retry_count=retry_count, entry=repr(rename_dict))) fix_entry_ops.append(rename_dict) @@ -886,14 +886,14 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): # A hardlink file exists with different name or # renamed file exists and we are sure from # matching_disk_gfid check that the entry doesn't - # exist with same gfid so we can safely delete on slave - logging.info(lf('Fixing file gfid mismatch in slave. ' + # exist with same gfid so we can safely delete on subordinate + logging.info(lf('Fixing file gfid mismatch in subordinate. ' 'Hardlink/Rename Case. Deleting entry', retry_count=retry_count, entry=repr(failure))) fix_entry_ops.append( edct('UNLINK', - gfid=failure[2]['slave_gfid'], + gfid=failure[2]['subordinate_gfid'], entry=pbname)) elif failure[1] == ENOENT: # Ignore ENOENT error for fix_entry_ops aka retry_count > 1 @@ -906,11 +906,11 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): elif op in ('MKNOD', 'CREATE', 'MKDIR'): pargfid = pbname.split('/')[1] st = lstat(os.path.join(pfx, pargfid)) - # Safe to ignore the failure as master doesn't contain + # Safe to ignore the failure as main doesn't contain # parent directory. if isinstance(st, int): - logging.info(lf('Fixing ENOENT error in slave. Parent ' - 'does not exist on master. Safe to ' + logging.info(lf('Fixing ENOENT error in subordinate. Parent ' + 'does not exist on main. Safe to ' 'ignore, take out entry', retry_count=retry_count, entry=repr(failure))) @@ -918,7 +918,7 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): if fix_entry_ops: # Process deletions of entries whose gfids are mismatched - failures1 = self.slave.server.entry_ops(fix_entry_ops) + failures1 = self.subordinate.server.entry_ops(fix_entry_ops) if not failures1: logging.info("Successfully fixed entry ops with gfid mismatch") @@ -1043,7 +1043,7 @@ def process_change(self, change, done, retry): if ty in ['RMDIR'] and not isinstance(st, int): logging.info(lf('Ignoring rmdir. Directory present in ' - 'master', gfid=gfid, pgfid_bname=en)) + 'main', gfid=gfid, pgfid_bname=en)) continue if not gconf.get("ignore-deletes"): @@ -1132,7 +1132,7 @@ def process_change(self, change, done, retry): elif et == self.TYPE_GFID: # If self.unlinked_gfids is available, then that means it is # retrying the changelog second time. Do not add the GFID's - # to rsync job if failed previously but unlinked in master + # to rsync job if failed previously but unlinked in main if self.unlinked_gfids and \ os.path.join(pfx, ec[0]) in self.unlinked_gfids: logging.debug("ignoring data, since file purged interim") @@ -1178,13 +1178,13 @@ def process_change(self, change, done, retry): # Increment counters for Status self.status.inc_value("entry", len(entries)) - failures = self.slave.server.entry_ops(entries) + failures = self.subordinate.server.entry_ops(entries) count = 0 while failures and count < self.MAX_OE_RETRIES: count += 1 self.handle_entry_failures(failures, entries) logging.info("Retry original entries. count = %s" % count) - failures = self.slave.server.entry_ops(entries) + failures = self.subordinate.server.entry_ops(entries) if not failures: logging.info("Sucessfully fixed all entry ops with gfid " "mismatch") @@ -1226,7 +1226,7 @@ def process_change(self, change, done, retry): meta_entries.append(edct('META', go=go[0], stat=st)) if meta_entries: self.status.inc_value("meta", len(entries)) - failures = self.slave.server.meta_ops(meta_entries) + failures = self.subordinate.server.meta_ops(meta_entries) self.log_failures(failures, 'go', '', 'META') self.status.dec_value("meta", len(entries)) @@ -1296,7 +1296,7 @@ def process(self, changes, done=1): # and prevents a spiraling increase of wait stubs from consuming # unbounded memory and resources. - # update the slave's time with the timestamp of the _last_ + # update the subordinate's time with the timestamp of the _last_ # changelog file time suffix. Since, the changelog prefix time # is the time when the changelog was rolled over, introduce a # tolerance of 1 second to counter the small delta b/w the @@ -1396,7 +1396,7 @@ def process(self, changes, done=1): entry_stime=self.get_entry_stime())) def upd_entry_stime(self, stime): - self.slave.server.set_entry_stime(self.FLAT_DIR_HIERARCHY, + self.subordinate.server.set_entry_stime(self.FLAT_DIR_HIERARCHY, self.uuid, stime) @@ -1421,7 +1421,7 @@ def update_worker_remote_node(self): node_data = node.split("@") node = node_data[-1] remote_node_ip = node.split(":")[0] - self.status.set_slave_node(remote_node_ip) + self.status.set_subordinate_node(remote_node_ip) def changelogs_batch_process(self, changes): changelogs_batches = [] @@ -1458,7 +1458,7 @@ def crawl(self): changes = self.changelog_agent.getchanges() if changes: if data_stime: - logging.info(lf("slave's time", + logging.info(lf("subordinate's time", stime=data_stime)) processed = [x for x in changes if int(x.split('.')[-1]) < data_stime[0]] @@ -1483,7 +1483,7 @@ def register(self, register_time, changelog_agent, status): self.status = status -class GMasterChangeloghistoryMixin(GMasterChangelogMixin): +class GMainChangeloghistoryMixin(GMainChangelogMixin): def register(self, register_time, changelog_agent, status): self.changelog_agent = changelog_agent self.changelog_register_time = register_time @@ -1534,7 +1534,7 @@ def crawl(self): changes = self.changelog_agent.history_getchanges() if changes: if data_stime: - logging.info(lf("slave's time", + logging.info(lf("subordinate's time", stime=data_stime)) processed = [x for x in changes if int(x.split('.')[-1]) < data_stime[0]] @@ -1570,7 +1570,7 @@ def crawl(self): raise PartialHistoryAvailable(str(actual_end)) -class GMasterXsyncMixin(GMasterChangelogMixin): +class GMainXsyncMixin(GMainChangelogMixin): """ This crawl needs to be xtime based (as of now @@ -1637,7 +1637,7 @@ def Xsyncer(): self.process([item[1]], 0) self.archive_and_purge_changelogs([item[1]]) elif item[0] == 'stime': - logging.debug(lf('setting slave time', + logging.debug(lf('setting subordinate time', time=item[1])) self.upd_stime(item[1][1], item[1][0]) else: @@ -1703,34 +1703,34 @@ def is_sticky(self, path, mo): """check for DHTs linkto sticky bit file""" sticky = False if mo & 01000: - sticky = self.master.server.linkto_check(path) + sticky = self.main.server.linkto_check(path) return sticky def Xcrawl(self, path='.', xtr_root=None): """ generate a CHANGELOG file consumable by process_change. - slave's xtime (stime) is _cached_ for comparisons across + subordinate's xtime (stime) is _cached_ for comparisons across the filesystem tree, but set after directory synchronization. """ if path == '.': self.crawls += 1 if not xtr_root: # get the root stime and use it for all comparisons - xtr_root = self.xtime('.', self.slave) + xtr_root = self.xtime('.', self.subordinate) if isinstance(xtr_root, int): if xtr_root != ENOENT: - logging.warn(lf("slave cluster not returning the " + logging.warn(lf("subordinate cluster not returning the " "xtime for root", error=xtr_root)) xtr_root = self.minus_infinity xtl = self.xtime(path) if isinstance(xtl, int): - logging.warn("master cluster's xtime not found") - xtr = self.xtime(path, self.slave) + logging.warn("main cluster's xtime not found") + xtr = self.xtime(path, self.subordinate) if isinstance(xtr, int): if xtr != ENOENT: - logging.warn(lf("slave cluster not returning the " + logging.warn(lf("subordinate cluster not returning the " "xtime for dir", path=path, error=xtr)) @@ -1745,8 +1745,8 @@ def Xcrawl(self, path='.', xtr_root=None): return self.xtime_reversion_hook(path, xtl, xtr) logging.debug("entering " + path) - dem = self.master.server.entries(path) - pargfid = self.master.server.gfid(path) + dem = self.main.server.entries(path) + pargfid = self.main.server.gfid(path) if isinstance(pargfid, int): logging.warn(lf('skipping directory', path=path)) @@ -1761,7 +1761,7 @@ def Xcrawl(self, path='.', xtr_root=None): continue if not self.need_sync(e, xte, xtr): continue - st = self.master.server.lstat(e) + st = self.main.server.lstat(e) if isinstance(st, int): logging.warn(lf('got purged in the interim', path=e)) @@ -1770,7 +1770,7 @@ def Xcrawl(self, path='.', xtr_root=None): logging.debug(lf('ignoring sticky bit file', path=e)) continue - gfid = self.master.server.gfid(e) + gfid = self.main.server.gfid(e) if isinstance(gfid, int): logging.warn(lf('skipping entry', path=e)) @@ -1811,7 +1811,7 @@ def Xcrawl(self, path='.', xtr_root=None): nlink = st.st_nlink nlink -= 1 # fixup backend stat link count # if a file has a hardlink, create a Changelog entry as - # 'LINK' so the slave side will decide if to create the + # 'LINK' so the subordinate side will decide if to create the # new entry, or to create link. if nlink == 1: self.write_entry_change("E", @@ -1912,10 +1912,10 @@ class Syncer(object): each completed syncjob. """ - def __init__(self, slave, sync_engine, resilient_errnos=[]): + def __init__(self, subordinate, sync_engine, resilient_errnos=[]): """spawn worker threads""" self.log_err = False - self.slave = slave + self.subordinate = subordinate self.lock = Lock() self.pb = PostBox() self.sync_engine = sync_engine diff --git a/geo-replication/syncdaemon/monitor.py b/geo-replication/syncdaemon/monitor.py index 52ae256..1de5d64 100644 --- a/geo-replication/syncdaemon/monitor.py +++ b/geo-replication/syncdaemon/monitor.py @@ -77,8 +77,8 @@ def terminate(): # give a chance to graceful exit errno_wrap(os.kill, [-os.getpid(), signal.SIGTERM], [ESRCH]) - def monitor(self, w, argv, cpids, agents, slave_vol, slave_host, master, - suuid, slavenodes): + def monitor(self, w, argv, cpids, agents, subordinate_vol, subordinate_host, main, + suuid, subordinatenodes): """the monitor loop Basic logic is a blantantly simple blunt heuristics: @@ -102,9 +102,9 @@ def monitor(self, w, argv, cpids, agents, slave_vol, slave_host, master, w[0]['host'], w[0]['dir'], w[0]['uuid'], - master, - "%s::%s" % (slave_host, - slave_vol)) + main, + "%s::%s" % (subordinate_host, + subordinate_vol)) set_monitor_status(gconf.get("state-file"), self.ST_STARTED) self.status[w[0]['dir']].set_worker_status(self.ST_INIT) @@ -138,15 +138,15 @@ def exit_status(s): while ret in (0, 1): remote_user, remote_host = w[1][0].split("@") remote_id = w[1][1] - # Check the status of the connected slave node - # If the connected slave node is down then try to connect to + # Check the status of the connected subordinate node + # If the connected subordinate node is down then try to connect to # different up node. - current_slave_host = remote_host - slave_up_hosts = get_up_nodes(slavenodes, gconf.get("ssh-port")) + current_subordinate_host = remote_host + subordinate_up_hosts = get_up_nodes(subordinatenodes, gconf.get("ssh-port")) - if (current_slave_host, remote_id) not in slave_up_hosts: - if len(slave_up_hosts) > 0: - remote_new = random.choice(slave_up_hosts) + if (current_subordinate_host, remote_id) not in subordinate_up_hosts: + if len(subordinate_up_hosts) > 0: + remote_new = random.choice(subordinate_up_hosts) remote_host = "%s@%s" % (remote_user, remote_new[0]) remote_id = remote_new[1] @@ -155,7 +155,7 @@ def exit_status(s): logging.info(lf('starting gsyncd worker', brick=w[0]['dir'], - slave_node=remote_host)) + subordinate_node=remote_host)) # Couple of pipe pairs for RPC communication b/w # worker and changelog agent. @@ -172,12 +172,12 @@ def exit_status(s): os.close(ww) args_to_agent = argv + [ 'agent', - rconf.args.master, - rconf.args.slave, + rconf.args.main, + rconf.args.subordinate, '--local-path', w[0]['dir'], '--local-node', w[0]['host'], '--local-node-id', w[0]['uuid'], - '--slave-id', suuid, + '--subordinate-id', suuid, '--rpc-fd', ','.join([str(ra), str(wa), str(rw), str(ww)]) ] @@ -198,13 +198,13 @@ def exit_status(s): args_to_worker = argv + [ 'worker', - rconf.args.master, - rconf.args.slave, + rconf.args.main, + rconf.args.subordinate, '--feedback-fd', str(pw), '--local-path', w[0]['dir'], '--local-node', w[0]['host'], '--local-node-id', w[0]['uuid'], - '--slave-id', suuid, + '--subordinate-id', suuid, '--rpc-fd', ','.join([str(rw), str(ww), str(ra), str(wa)]), '--subvol-num', str(w[2]), @@ -295,11 +295,11 @@ def exit_status(s): else: logging.info( lf("Worker not confirmed after wait, aborting it. " - "Gsyncd invocation on remote slave via SSH or " - "gluster master mount might have hung. Please " + "Gsyncd invocation on remote subordinate via SSH or " + "gluster main mount might have hung. Please " "check the above logs for exact issue and check " - "master or slave volume for errors. Restarting " - "master/slave volume accordingly might help.", + "main or subordinate volume for errors. Restarting " + "main/subordinate volume accordingly might help.", brick=w[0]['dir'], timeout=conn_timeout)) errno_wrap(os.kill, [cpid, signal.SIGKILL], [ESRCH]) @@ -317,18 +317,18 @@ def exit_status(s): if ret in (0, 1): self.status[w[0]['dir']].set_worker_status(self.ST_FAULTY) gf_event(EVENT_GEOREP_FAULTY, - master_volume=master.volume, - master_node=w[0]['host'], - master_node_id=w[0]['uuid'], - slave_host=slave_host, - slave_volume=slave_vol, - current_slave_host=current_slave_host, + main_volume=main.volume, + main_node=w[0]['host'], + main_node_id=w[0]['uuid'], + subordinate_host=subordinate_host, + subordinate_volume=subordinate_vol, + current_subordinate_host=current_subordinate_host, brick_path=w[0]['dir']) time.sleep(10) self.status[w[0]['dir']].set_worker_status(self.ST_INCON) return ret - def multiplex(self, wspx, suuid, slave_vol, slave_host, master, slavenodes): + def multiplex(self, wspx, suuid, subordinate_vol, subordinate_host, main, subordinatenodes): argv = [os.path.basename(sys.executable), sys.argv[0]] cpids = set() @@ -336,8 +336,8 @@ def multiplex(self, wspx, suuid, slave_vol, slave_host, master, slavenodes): ta = [] for wx in wspx: def wmon(w): - cpid, _ = self.monitor(w, argv, cpids, agents, slave_vol, - slave_host, master, suuid, slavenodes) + cpid, _ = self.monitor(w, argv, cpids, agents, subordinate_vol, + subordinate_host, main, suuid, subordinatenodes) time.sleep(1) self.lock.acquire() for cpid in cpids: @@ -353,32 +353,32 @@ def wmon(w): t.join() -def distribute(master, slave): +def distribute(main, subordinate): if rconf.args.use_gconf_volinfo: - mvol = VolinfoFromGconf(master.volume, master=True) + mvol = VolinfoFromGconf(main.volume, main=True) else: - mvol = Volinfo(master.volume, master.host) - logging.debug('master bricks: ' + repr(mvol.bricks)) + mvol = Volinfo(main.volume, main.host) + logging.debug('main bricks: ' + repr(mvol.bricks)) prelude = [] - slave_host = None - slave_vol = None + subordinate_host = None + subordinate_vol = None prelude = [gconf.get("ssh-command")] + \ gconf.get("ssh-options").split() + \ ["-p", str(gconf.get("ssh-port"))] + \ - [slave.remote_addr] + [subordinate.remote_addr] - logging.debug('slave SSH gateway: ' + slave.remote_addr) + logging.debug('subordinate SSH gateway: ' + subordinate.remote_addr) if rconf.args.use_gconf_volinfo: - svol = VolinfoFromGconf(slave.volume, master=False) + svol = VolinfoFromGconf(subordinate.volume, main=False) else: - svol = Volinfo(slave.volume, "localhost", prelude) + svol = Volinfo(subordinate.volume, "localhost", prelude) sbricks = svol.bricks suuid = svol.uuid - slave_host = slave.remote_addr.split('@')[-1] - slave_vol = slave.volume + subordinate_host = subordinate.remote_addr.split('@')[-1] + subordinate_vol = subordinate.volume # save this xattr for the session delete command old_stime_xattr_prefix = gconf.get("stime-xattr-prefix", None) @@ -388,22 +388,22 @@ def distribute(master, slave): old_stime_xattr_prefix != new_stime_xattr_prefix: gconf.setconfig("stime-xattr-prefix", new_stime_xattr_prefix) - logging.debug('slave bricks: ' + repr(sbricks)) + logging.debug('subordinate bricks: ' + repr(sbricks)) - slavenodes = set((b['host'], b["uuid"]) for b in sbricks) - rap = SSH.parse_ssh_address(slave) - slaves = [(rap['user'] + '@' + h[0], h[1]) for h in slavenodes] + subordinatenodes = set((b['host'], b["uuid"]) for b in sbricks) + rap = SSH.parse_ssh_address(subordinate) + subordinates = [(rap['user'] + '@' + h[0], h[1]) for h in subordinatenodes] workerspex = [] for idx, brick in enumerate(mvol.bricks): if rconf.args.local_node_id == brick['uuid']: is_hot = mvol.is_hot(":".join([brick['host'], brick['dir']])) workerspex.append((brick, - slaves[idx % len(slaves)], + subordinates[idx % len(subordinates)], get_subvol_num(idx, mvol, is_hot), is_hot)) logging.debug('worker specs: ' + repr(workerspex)) - return workerspex, suuid, slave_vol, slave_host, master, slavenodes + return workerspex, suuid, subordinate_vol, subordinate_host, main, subordinatenodes def monitor(local, remote): diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py index df4006f..e440b14 100644 --- a/geo-replication/syncdaemon/resource.py +++ b/geo-replication/syncdaemon/resource.py @@ -28,7 +28,7 @@ import repce from repce import RepceServer, RepceClient -from master import gmaster_builder +from main import gmain_builder import syncdutils from syncdutils import GsyncdError, select, privileged, funcode from syncdutils import umask, entry2pb, gauxpfx, errno_wrap, lstat @@ -360,7 +360,7 @@ def set_xtime_remote(cls, path, uuid, mark): the difference b/w this and set_xtime() being set_xtime() being overloaded to set the xtime on the brick (this method sets xtime on the - remote slave) + remote subordinate) """ Xattr.lsetxattr( path, '.'.join([cls.GX_NSPACE, uuid, 'xtime']), @@ -409,7 +409,7 @@ def entry_purge(op, entry, gfid, e): # The race here is between the GFID check and the purge. # If the entry or the gfid of the file to be deleted is not present - # on slave, we can ignore the unlink/rmdir + # on subordinate, we can ignore the unlink/rmdir if isinstance(lstat(entry), int) or \ isinstance(lstat(os.path.join(pfx, gfid)), int): return @@ -421,7 +421,7 @@ def entry_purge(op, entry, gfid, e): if op == 'UNLINK': er = errno_wrap(os.unlink, [entry], [ENOENT, ESTALE], [EBUSY]) # EISDIR is safe error, ignore. This can only happen when - # unlink is sent from master while fixing gfid conflicts. + # unlink is sent from main while fixing gfid conflicts. if er != EISDIR: return er @@ -436,11 +436,11 @@ def collect_failure(e, cmd_ret, dst=False): slv_entry_info['gfid_mismatch'] = False slv_entry_info['name_mismatch'] = False slv_entry_info['dst'] = dst - slv_entry_info['slave_isdir'] = False - slv_entry_info['slave_name'] = None - slv_entry_info['slave_gfid'] = None - # We do this for failing fops on Slave - # Master should be logging this + slv_entry_info['subordinate_isdir'] = False + slv_entry_info['subordinate_name'] = None + slv_entry_info['subordinate_gfid'] = None + # We do this for failing fops on Subordinate + # Main should be logging this if cmd_ret is None: return False @@ -456,13 +456,13 @@ def collect_failure(e, cmd_ret, dst=False): st = lstat(en) if not isinstance(st, int): if st and stat.S_ISDIR(st.st_mode): - slv_entry_info['slave_isdir'] = True + slv_entry_info['subordinate_isdir'] = True dir_name = get_slv_dir_path(slv_host, slv_volume, disk_gfid) - slv_entry_info['slave_name'] = dir_name + slv_entry_info['subordinate_name'] = dir_name else: - slv_entry_info['slave_isdir'] = False - slv_entry_info['slave_gfid'] = disk_gfid + slv_entry_info['subordinate_isdir'] = False + slv_entry_info['subordinate_gfid'] = disk_gfid failures.append((e, cmd_ret, slv_entry_info)) else: return False @@ -588,12 +588,12 @@ def rename_with_disk_gfid_confirmation(gfid, entry, en): gfid, bname, e['mode'], e['uid'], e['gid']) elif (isinstance(lstat(en), int) or not matching_disk_gfid(gfid, en)): - # If gfid of a directory exists on slave but path based + # If gfid of a directory exists on subordinate but path based # create is getting EEXIST. This means the directory is - # renamed in master but recorded as MKDIR during hybrid + # renamed in main but recorded as MKDIR during hybrid # crawl. Get the directory path by reading the backend # symlink and trying to rename to new name as said by - # master. + # main. logging.info(lf("Special case: rename on mkdir", gfid=gfid, entry=repr(entry))) src_entry = get_slv_dir_path(slv_host, slv_volume, gfid) @@ -602,9 +602,9 @@ def rename_with_disk_gfid_confirmation(gfid, entry, en): slv_entry_info['gfid_mismatch'] = False slv_entry_info['name_mismatch'] = True slv_entry_info['dst'] = False - slv_entry_info['slave_isdir'] = True - slv_entry_info['slave_gfid'] = gfid - slv_entry_info['slave_entry'] = src_entry + slv_entry_info['subordinate_isdir'] = True + slv_entry_info['subordinate_gfid'] = gfid + slv_entry_info['subordinate_entry'] = src_entry failures.append((e, EEXIST, slv_entry_info)) elif op == 'LINK': @@ -828,8 +828,8 @@ def __init__(self, params): @classmethod def get_glusterprog(cls): gluster_cmd_dir = gconf.get("gluster-command-dir") - if rconf.args.subcmd == "slave": - gluster_cmd_dir = gconf.get("slave-gluster-command-dir") + if rconf.args.subcmd == "subordinate": + gluster_cmd_dir = gconf.get("subordinate-gluster-command-dir") return os.path.join(gluster_cmd_dir, cls.glusterprog) def umount_l(self, d): @@ -920,24 +920,24 @@ def inhibit(self, label): mntpt = mntdata[:-1] assert(mntpt) - umount_master = False - umount_slave = False + umount_main = False + umount_subordinate = False if rconf.args.subcmd == "worker" \ and not unshare_propagation_supported() \ and not gconf.get("access-mount"): - umount_master = True - if rconf.args.subcmd == "slave" \ - and not gconf.get("slave-access-mount"): - umount_slave = True + umount_main = True + if rconf.args.subcmd == "subordinate" \ + and not gconf.get("subordinate-access-mount"): + umount_subordinate = True - if mounted and (umount_master or umount_slave): + if mounted and (umount_main or umount_subordinate): po = self.umount_l(mntpt) po.terminate_geterr(fail_on_err=False) if po.returncode != 0: po.errlog() rv = po.returncode logging.debug("Lazy umount done: %s" % mntpt) - if umount_master or umount_slave: + if umount_main or umount_subordinate: self.cleanup_mntpt(mntpt) except: logging.exception('mount cleanup failure:') @@ -1067,11 +1067,11 @@ class GLUSTER(object): """scheme class for gluster:// urls - can be used to represent a gluster slave server - on slave side, or interface to a remote gluster - slave on master side, or to represent master - (slave-ish features come from the mixins, master - functionality is outsourced to GMaster from master) + can be used to represent a gluster subordinate server + on subordinate side, or interface to a remote gluster + subordinate on main side, or to represent main + (subordinate-ish features come from the mixins, main + functionality is outsourced to GMain from main) """ server = GLUSTERServer @@ -1101,12 +1101,12 @@ def connect(self): mounter = label and MountbrokerMounter or DirectMounter log_file = gconf.get("gluster-log-file") - if rconf.args.subcmd == "slave": - log_file = gconf.get("slave-gluster-log-file") + if rconf.args.subcmd == "subordinate": + log_file = gconf.get("subordinate-gluster-log-file") log_level = gconf.get("gluster-log-level") - if rconf.args.subcmd == "slave": - log_level = gconf.get("slave-gluster-log-level") + if rconf.args.subcmd == "subordinate": + log_level = gconf.get("subordinate-gluster-log-level") params = gconf.get("gluster-params").split() + \ ['log-level=' + log_level] + \ @@ -1118,22 +1118,22 @@ def connect(self): logging.info(lf("Mounted gluster volume", duration="%.4f" % (time.time() - t0))) - def gmaster_instantiate_tuple(self, slave): + def gmain_instantiate_tuple(self, subordinate): """return a tuple of the 'one shot' and the 'main crawl' class instance""" - return (gmaster_builder('xsync')(self, slave), - gmaster_builder()(self, slave), - gmaster_builder('changeloghistory')(self, slave)) + return (gmain_builder('xsync')(self, subordinate), + gmain_builder()(self, subordinate), + gmain_builder('changeloghistory')(self, subordinate)) - def service_loop(self, slave=None): + def service_loop(self, subordinate=None): """enter service loop - - if slave given, instantiate GMaster and + - if subordinate given, instantiate GMain and pass control to that instance, which implements - master behavior + main behavior - else do that's what's inherited """ - if rconf.args.subcmd == "slave": + if rconf.args.subcmd == "subordinate": if gconf.get("use-rsync-xattrs") and not privileged(): raise GsyncdError( "using rsync for extended attributes is not supported") @@ -1143,15 +1143,15 @@ def service_loop(self, slave=None): t = syncdutils.Thread(target=lambda: (repce.service_loop(), syncdutils.finalize())) t.start() - logging.info("slave listening") - if gconf.get("slave-timeout") and gconf.get("slave-timeout") > 0: + logging.info("subordinate listening") + if gconf.get("subordinate-timeout") and gconf.get("subordinate-timeout") > 0: while True: lp = self.server.last_keep_alive - time.sleep(gconf.get("slave-timeout")) + time.sleep(gconf.get("subordinate-timeout")) if lp == self.server.last_keep_alive: logging.info( lf("connection inactive, stopping", - timeout=gconf.get("slave-timeout"))) + timeout=gconf.get("subordinate-timeout"))) break else: select((), (), ()) @@ -1188,48 +1188,48 @@ def gfid(cls, e): def linkto_check(cls, e): return super(brickserver, cls).linkto_check(e) - # define {,set_}xtime in slave, thus preempting + # define {,set_}xtime in subordinate, thus preempting # the call to remote, so that it takes data from # the local brick - slave.server.xtime = types.MethodType( + subordinate.server.xtime = types.MethodType( lambda _self, path, uuid: ( brickserver.xtime(path, - uuid + '.' + rconf.args.slave_id) + uuid + '.' + rconf.args.subordinate_id) ), - slave.server) - slave.server.stime = types.MethodType( + subordinate.server) + subordinate.server.stime = types.MethodType( lambda _self, path, uuid: ( brickserver.stime(path, - uuid + '.' + rconf.args.slave_id) + uuid + '.' + rconf.args.subordinate_id) ), - slave.server) - slave.server.entry_stime = types.MethodType( + subordinate.server) + subordinate.server.entry_stime = types.MethodType( lambda _self, path, uuid: ( brickserver.entry_stime( path, - uuid + '.' + rconf.args.slave_id) + uuid + '.' + rconf.args.subordinate_id) ), - slave.server) - slave.server.set_stime = types.MethodType( + subordinate.server) + subordinate.server.set_stime = types.MethodType( lambda _self, path, uuid, mark: ( brickserver.set_stime(path, - uuid + '.' + rconf.args.slave_id, + uuid + '.' + rconf.args.subordinate_id, mark) ), - slave.server) - slave.server.set_entry_stime = types.MethodType( + subordinate.server) + subordinate.server.set_entry_stime = types.MethodType( lambda _self, path, uuid, mark: ( brickserver.set_entry_stime( path, - uuid + '.' + rconf.args.slave_id, + uuid + '.' + rconf.args.subordinate_id, mark) ), - slave.server) + subordinate.server) - (g1, g2, g3) = self.gmaster_instantiate_tuple(slave) - g1.master.server = brickserver - g2.master.server = brickserver - g3.master.server = brickserver + (g1, g2, g3) = self.gmain_instantiate_tuple(subordinate) + g1.main.server = brickserver + g2.main.server = brickserver + g3.main.server = brickserver # bad bad bad: bad way to do things like this # need to make this elegant @@ -1243,8 +1243,8 @@ def linkto_check(cls, e): rconf.args.local_node, rconf.args.local_path, rconf.args.local_node_id, - rconf.args.master, - rconf.args.slave) + rconf.args.main, + rconf.args.subordinate) status.reset_on_worker_start() rv = changelog_agent.version() if int(rv) != CHANGELOG_AGENT_CLIENT_VERSION: @@ -1313,7 +1313,7 @@ class SSH(object): """scheme class for ssh:// urls - interface to remote slave on master side + interface to remote subordinate on main side implementing an ssh based proxy """ @@ -1349,11 +1349,11 @@ def start_fd_client(self, i, o): raise GsyncdError( "RePCe major version mismatch: local %s, remote %s" % (exrv, rv)) - slavepath = "/proc/%d/cwd" % self.server.pid() - self.slaveurl = ':'.join([self.remote_addr, slavepath]) + subordinatepath = "/proc/%d/cwd" % self.server.pid() + self.subordinateurl = ':'.join([self.remote_addr, subordinatepath]) def connect_remote(self): - """connect to inner slave url through outer ssh url + """connect to inner subordinate url through outer ssh url Wrap the connecting utility in ssh. @@ -1374,7 +1374,7 @@ def connect_remote(self): self.remote_addr, self.volume) - logging.info("Initializing SSH connection between master and slave...") + logging.info("Initializing SSH connection between main and subordinate...") t0 = time.time() extra_opts = [] @@ -1385,42 +1385,42 @@ def connect_remote(self): if gconf.get("use-rsync-xattrs"): extra_opts.append('--use-rsync-xattrs') - args_to_slave = [gconf.get("ssh-command")] + \ + args_to_subordinate = [gconf.get("ssh-command")] + \ gconf.get("ssh-options").split() + \ ["-p", str(gconf.get("ssh-port"))] + \ rconf.ssh_ctl_args + [self.remote_addr] + \ - [remote_gsyncd, "slave"] + \ + [remote_gsyncd, "subordinate"] + \ extra_opts + \ - [rconf.args.master, rconf.args.slave] + \ + [rconf.args.main, rconf.args.subordinate] + \ [ - '--master-node', rconf.args.local_node, - '--master-node-id', rconf.args.local_node_id, - '--master-brick', rconf.args.local_path, + '--main-node', rconf.args.local_node, + '--main-node-id', rconf.args.local_node_id, + '--main-brick', rconf.args.local_path, '--local-node', rconf.args.resource_remote, '--local-node-id', rconf.args.resource_remote_id] + \ [ - # Add all config arguments here, slave gsyncd will not use - # config file in slave side, so all overriding options should + # Add all config arguments here, subordinate gsyncd will not use + # config file in subordinate side, so all overriding options should # be sent as arguments - '--slave-timeout', str(gconf.get("slave-timeout")), - '--slave-log-level', gconf.get("slave-log-level"), - '--slave-gluster-log-level', - gconf.get("slave-gluster-log-level"), - '--slave-gluster-command-dir', - gconf.get("slave-gluster-command-dir")] + '--subordinate-timeout', str(gconf.get("subordinate-timeout")), + '--subordinate-log-level', gconf.get("subordinate-log-level"), + '--subordinate-gluster-log-level', + gconf.get("subordinate-gluster-log-level"), + '--subordinate-gluster-command-dir', + gconf.get("subordinate-gluster-command-dir")] - if gconf.get("slave-access-mount"): - args_to_slave.append('--slave-access-mount') + if gconf.get("subordinate-access-mount"): + args_to_subordinate.append('--subordinate-access-mount') if rconf.args.debug: - args_to_slave.append('--debug') + args_to_subordinate.append('--debug') - po = Popen(args_to_slave, + po = Popen(args_to_subordinate, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) rconf.transport = po self.start_fd_client(po.stdout, po.stdin) - logging.info(lf("SSH connection between master and slave established.", + logging.info(lf("SSH connection between main and subordinate established.", duration="%.4f" % (time.time() - t0))) def rsync(self, files, *args, **kw): @@ -1466,7 +1466,7 @@ def rsync(self, files, *args, **kw): gconf.get("rsync-options").split() + \ extra_rsync_flags + ['.'] + \ ["-e", " ".join(rsync_ssh_opts)] + \ - [self.slaveurl] + [self.subordinateurl] log_rsync_performance = gconf.getr("log-rsync-performance", False) @@ -1509,7 +1509,7 @@ def rsync(self, files, *args, **kw): return po - def tarssh(self, files, slaveurl, log_err=False): + def tarssh(self, files, subordinateurl, log_err=False): """invoke tar+ssh -z (compress) can be use if needed, but omitting it now as it results in weird error (tar+ssh errors out (errcode: 2) @@ -1517,7 +1517,7 @@ def tarssh(self, files, slaveurl, log_err=False): if not files: raise GsyncdError("no files to sync") logging.debug("files: " + ", ".join(files)) - (host, rdir) = slaveurl.split(':') + (host, rdir) = subordinateurl.split(':') tar_cmd = ["tar"] + \ ["--sparse", "-cf", "-", "--files-from", "-"] ssh_cmd = gconf.get("ssh-command-tar").split() + \ diff --git a/geo-replication/syncdaemon/subcmds.py b/geo-replication/syncdaemon/subcmds.py index 11d263d..4834651 100644 --- a/geo-replication/syncdaemon/subcmds.py +++ b/geo-replication/syncdaemon/subcmds.py @@ -32,15 +32,15 @@ def subcmd_monitor_status(args): def subcmd_status(args): from gsyncdstatus import GeorepStatus - master_name = args.master.replace(":", "") - slave_data = args.slave.replace("ssh://", "") + main_name = args.main.replace(":", "") + subordinate_data = args.subordinate.replace("ssh://", "") brick_status = GeorepStatus(gconf.get("state-file"), "", args.local_path, "", - master_name, - slave_data, + main_name, + subordinate_data, gconf.get("pid-file")) checkpoint_time = gconf.get("checkpoint", 0) brick_status.print_status(checkpoint_time=checkpoint_time, @@ -54,9 +54,9 @@ def subcmd_monitor(args): monitor.startup(go_daemon) Popen.init_errhandler() - local = GLUSTER("localhost", args.master) - slavehost, slavevol = args.slave.split("::") - remote = SSH(slavehost, slavevol) + local = GLUSTER("localhost", args.main) + subordinatehost, subordinatevol = args.subordinate.split("::") + remote = SSH(subordinatehost, subordinatevol) return monitor.monitor(local, remote) @@ -72,9 +72,9 @@ def subcmd_worker(args): Popen.init_errhandler() fcntl.fcntl(args.feedback_fd, fcntl.F_SETFD, fcntl.FD_CLOEXEC) - local = GLUSTER("localhost", args.master) - slavehost, slavevol = args.slave.split("::") - remote = SSH(slavehost, slavevol) + local = GLUSTER("localhost", args.main) + subordinatehost, subordinatevol = args.subordinate.split("::") + remote = SSH(subordinatehost, subordinatevol) remote.connect_remote() local.connect() logging.info("Worker spawn successful. Acknowledging back to monitor") @@ -82,12 +82,12 @@ def subcmd_worker(args): local.service_loop(remote) -def subcmd_slave(args): +def subcmd_subordinate(args): from resource import GLUSTER, Popen Popen.init_errhandler() - slavevol = args.slave.split("::")[-1] - local = GLUSTER("localhost", slavevol) + subordinatevol = args.subordinate.split("::")[-1] + local = GLUSTER("localhost", subordinatevol) local.connect() local.service_loop() @@ -117,28 +117,28 @@ def subcmd_voluuidget(args): vix, err = po.communicate() if po.returncode != 0: logging.info(lf("Volume info failed, unable to get " - "volume uuid of slavevol, " + "volume uuid of subordinatevol, " "returning empty string", - slavevol=args.volname, - slavehost=args.host, + subordinatevol=args.volname, + subordinatehost=args.host, error=po.returncode)) return "" vi = XET.fromstring(vix) if vi.find('opRet').text != '0': - logging.info(lf("Unable to get volume uuid of slavevol, " + logging.info(lf("Unable to get volume uuid of subordinatevol, " "returning empty string", - slavevol=args.volname, - slavehost=args.host, + subordinatevol=args.volname, + subordinatehost=args.host, error=vi.find('opErrstr').text)) return "" try: voluuid = vi.find("volInfo/volumes/volume/id").text except (ParseError, AttributeError, ValueError) as e: - logging.info(lf("Parsing failed to volume uuid of slavevol, " + logging.info(lf("Parsing failed to volume uuid of subordinatevol, " "returning empty string", - slavevol=args.volname, - slavehost=args.host, + subordinatevol=args.volname, + subordinatehost=args.host, error=e)) voluuid = "" @@ -211,8 +211,8 @@ def subcmd_delete(args): for p in args.paths: if p != "": # set stime to (0,0) to trigger full volume content resync - # to slave on session recreation - # look at master.py::Xcrawl hint: zero_zero + # to subordinate on session recreation + # look at main.py::Xcrawl hint: zero_zero errno_wrap(Xattr.lsetxattr, (p, stime_xattr_prefix + ".stime", struct.pack("!II", 0, 0)), diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py index 93e0975..6dcd659 100644 --- a/geo-replication/syncdaemon/syncdutils.py +++ b/geo-replication/syncdaemon/syncdutils.py @@ -175,7 +175,7 @@ def setup_ssh_ctl(ctld, remote_addr, resource_url): create_manifest(fname, content) ssh_ctl_path = os.path.join(rconf.ssh_ctl_dir, "%s.sock" % content_sha256) - rconf.ssh_ctl_args = ["-oControlMaster=auto", "-S", ssh_ctl_path] + rconf.ssh_ctl_args = ["-oControlMain=auto", "-S", ssh_ctl_path] def grabfile(fname, content=None): @@ -313,7 +313,7 @@ def log_raise_exception(excont): "errors is most likely due to " "MISCONFIGURATION, please remove all " "the public keys added by geo-replication " - "from authorized_keys file in slave nodes " + "from authorized_keys file in subordinate nodes " "and run Geo-replication create " "command again.") logging.error("If `gsec_create container` was used, then " @@ -617,16 +617,16 @@ def get_changelog_log_level(lvl): return getattr(GlusterLogLevel, lvl, GlusterLogLevel.INFO) -def get_master_and_slave_data_from_args(args): - master_name = None - slave_data = None +def get_main_and_subordinate_data_from_args(args): + main_name = None + subordinate_data = None for arg in args: if arg.startswith(":"): - master_name = arg.replace(":", "") + main_name = arg.replace(":", "") if "::" in arg: - slave_data = arg.replace("ssh://", "") + subordinate_data = arg.replace("ssh://", "") - return (master_name, slave_data) + return (main_name, subordinate_data) def unshare_propagation_supported(): global unshare_mnt_propagation @@ -937,17 +937,17 @@ class VolinfoFromGconf(object): # Volinfo object API/interface kept as is so that caller need not # change anything except calling this instead of Volinfo() # - # master-bricks= - # master-bricks=NODEID:HOSTNAME:PATH,.. - # slave-bricks=NODEID:HOSTNAME,.. - # master-volume-id= - # slave-volume-id= - # master-replica-count= - # master-disperse_count= - def __init__(self, vol, host='localhost', master=True): + # main-bricks= + # main-bricks=NODEID:HOSTNAME:PATH,.. + # subordinate-bricks=NODEID:HOSTNAME,.. + # main-volume-id= + # subordinate-volume-id= + # main-replica-count= + # main-disperse_count= + def __init__(self, vol, host='localhost', main=True): self.volume = vol self.host = host - self.master = master + self.main = main def is_tier(self): return False @@ -958,7 +958,7 @@ def is_hot(self, brickpath): @property @memoize def bricks(self): - pfx = "master-" if self.master else "slave-" + pfx = "main-" if self.main else "subordinate-" bricks_data = gconf.get(pfx + "bricks") if bricks_data is None: return [] @@ -976,16 +976,16 @@ def bricks(self): @property @memoize def uuid(self): - if self.master: - return gconf.get("master-volume-id") + if self.main: + return gconf.get("main-volume-id") else: - return gconf.get("slave-volume-id") + return gconf.get("subordinate-volume-id") def replica_count(self, tier, hot): - return gconf.get("master-replica-count") + return gconf.get("main-replica-count") def disperse_count(self, tier, hot): - return gconf.get("master-disperse-count") + return gconf.get("main-disperse-count") @property @memoize diff --git a/geo-replication/tests/unit/test_gsyncdstatus.py b/geo-replication/tests/unit/test_gsyncdstatus.py index b4ce8fa..5e257b3 100644 --- a/geo-replication/tests/unit/test_gsyncdstatus.py +++ b/geo-replication/tests/unit/test_gsyncdstatus.py @@ -55,7 +55,7 @@ def test_set_monitor_status(self): def test_default_values_test(self): self.assertTrue(get_default_values(), { - "slave_node": DEFAULT_STATUS, + "subordinate_node": DEFAULT_STATUS, "worker_status": DEFAULT_STATUS, "last_synced": 0, "last_synced_utc": 0, @@ -98,15 +98,15 @@ def test_crawl_status(self): self.status.set_worker_crawl_status(st) self.assertTrue(self.status.get_status()["crawl_status"], st) - def test_slave_node(self): + def test_subordinate_node(self): set_monitor_status(self.monitor_status_file, "Started") self.status.set_active() - self.status.set_slave_node("fvm2") - self.assertTrue(self.status.get_status()["slave_node"], "fvm2") + self.status.set_subordinate_node("fvm2") + self.assertTrue(self.status.get_status()["subordinate_node"], "fvm2") self.status.set_worker_status("Passive") - self.status.set_slave_node("fvm2") - self.assertTrue(self.status.get_status()["slave_node"], "fvm2") + self.status.set_subordinate_node("fvm2") + self.assertTrue(self.status.get_status()["subordinate_node"], "fvm2") def test_active_worker_status(self): set_monitor_status(self.monitor_status_file, "Started")