def setup_log(self, name='train', log_dir=None, file_name=None):
if not self.logger:
self.logger = get_logger(
name, log_dir, distributed_rank=0, filename=file_name) #TODO self.args.local_rank=0?
else:
self.logger.warning('already exists logger')
return self.logger
# def inject_default_parser(self):
# p = self.parser
# p.add_argument(
# '-d', '--devices', default='0',
# help='set data parallel training')
# p.add_argument(
# '-c', '--continue', type=extant_file, metavar="FILE",
# dest="continue_fpath",
# help='continue from one certain checkpoint')
# p.add_argument(
# '--local_rank', default=0, type=int,
# help='process rank on node')
def setup_log(self, name='train', log_dir=None, file_name=None):
if not self.logger:
self.logger = get_logger(
name, log_dir, distributed_rank=0, filename=file_name) #TODO self.args.local_rank=0?
else:
self.logger.warning('already exists logger')
return self.logger
# def inject_default_parser(self):
# p = self.parser
# p.add_argument(
# '-d', '--devices', default='0',
# help='set data parallel training')
# p.add_argument(
# '-c', '--continue', type=extant_file, metavar="FILE",
# dest="continue_fpath",
# help='continue from one certain checkpoint')
# p.add_argument(
# '--local_rank', default=0, type=int,
# help='process rank on node')