diff --git a/scripts/elfgames/go/console_lib.py b/scripts/elfgames/go/console_lib.py index f461673d..dab26581 100644 --- a/scripts/elfgames/go/console_lib.py +++ b/scripts/elfgames/go/console_lib.py @@ -334,7 +334,7 @@ def check_player(self, batch, player): return True, None def print_msg(self, ret, msg): - print("\n%s %s\n\n" % (("=" if ret else "?"), msg)) + print("%s %s\n" % (("=" if ret else "?"), msg)) def prompt(self, prompt_str, batch): # Show last command results. diff --git a/scripts/elfgames/go/df_console.py b/scripts/elfgames/go/df_console.py index be65572c..7199e5f8 100644 --- a/scripts/elfgames/go/df_console.py +++ b/scripts/elfgames/go/df_console.py @@ -9,6 +9,8 @@ import os import sys +sys.path.append( os.path.expanduser('~/anaconda3/lib/python3.6/site-packages') ) + import torch from console_lib import GoConsoleGTP @@ -16,10 +18,10 @@ def main(): - # print('Python version:', sys.version) - # print('PyTorch version:', torch.__version__) - # print('CUDA version', torch.version.cuda) - # print('Conda env:', os.environ.get("CONDA_DEFAULT_ENV", "")) + print('Python version:', sys.version, file=sys.stderr) + print('PyTorch version:', torch.__version__, file=sys.stderr) + print('CUDA version', torch.version.cuda, file=sys.stderr) + print('Conda env:', os.environ.get("CONDA_DEFAULT_ENV", ""), file=sys.stderr) additional_to_load = { 'evaluator': ( diff --git a/src_py/elf/utils_elf.py b/src_py/elf/utils_elf.py index c398c0ea..908b031a 100644 --- a/src_py/elf/utils_elf.py +++ b/src_py/elf/utils_elf.py @@ -34,7 +34,7 @@ def _alloc(p, gpu, use_numpy=True): type_name = p.field().type_name() sz = p.field().sz().vec() - # print(name, type_name, sz) + print(name, type_name, sz, file=sys.stderr) if not use_numpy: v = Allocator.torch_types[type_name](*sz) @@ -63,7 +63,7 @@ def spec2batches(ctx, batchsize, spec, gpu, use_numpy=False, num_recv=1): idx2name = dict() for name, v in spec.items(): - # print("%s: %s" % (name, v)) + print("%s: %s" % (name, v), file=sys.stderr) # TODO this might not good since it changes the input. if "input" not in v or v["input"] is None: v["input"] = [] @@ -74,7 +74,7 @@ def spec2batches(ctx, batchsize, spec, gpu, use_numpy=False, num_recv=1): this_batchsize = v.get("batchsize", batchsize) keys = list(set(v["input"] + v["reply"])) - # print("SharedMem: \"%s\", keys: %s" % (name, str(keys))) + print("SharedMem: \"%s\", keys: %s" % (name, str(keys)), file=sys.stderr) smem_opts = ctx.createSharedMemOptions(name, this_batchsize) smem_opts.setTimeout(v.get("timeout_usec", 0)) diff --git a/src_py/rlpytorch/model_loader.py b/src_py/rlpytorch/model_loader.py index c4040ee1..7a5553d9 100644 --- a/src_py/rlpytorch/model_loader.py +++ b/src_py/rlpytorch/model_loader.py @@ -10,6 +10,7 @@ import time import torch import warnings +import sys from elf.options import import_options, PyOptionSpec from elf import logging @@ -25,7 +26,7 @@ def load_module(mod): """Load a python module.""" module = importlib.import_module(mod) - # print(module, mod) + print(module, mod, file=sys.stderr) return module