# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """TensorBoard functions that can be used to log various status during epoch.""" from __future__ import absolute_import import logging class LogMetricsCallback(object): """Log metrics periodically in TensorBoard. This callback works almost same as `callback.Speedometer`, but write TensorBoard event file for visualization. For more usage, please refer https://github.com/dmlc/tensorboard Parameters ---------- logging_dir : str TensorBoard event file directory. After that, use `tensorboard --logdir=path/to/logs` to launch TensorBoard visualization. prefix : str Prefix for a metric name of `scalar` value. You might want to use this param to leverage TensorBoard plot feature, where TensorBoard plots different curves in one graph when they have same `name`. The follow example shows the usage(how to compare a train and eval metric in a same graph). Examples -------- >>> # log train and eval metrics under different directories. >>> training_log = 'logs/train' >>> evaluation_log = 'logs/eval' >>> # in this case, each training and evaluation metric pairs has same name, >>> # you can add a prefix to make it separate. >>> batch_end_callbacks = [mx.contrib.tensorboard.LogMetricsCallback(training_log)] >>> eval_end_callbacks = [mx.contrib.tensorboard.LogMetricsCallback(evaluation_log)] >>> # run >>> model.fit(train, >>> ... >>> batch_end_callback = batch_end_callbacks, >>> eval_end_callback = eval_end_callbacks) >>> # Then use `tensorboard --logdir=logs/` to launch TensorBoard visualization. """ def __init__(self, logging_dir, prefix=None): self.prefix = prefix try: from mxboard import SummaryWriter self.summary_writer = SummaryWriter(logging_dir) except ImportError: logging.error('You can install mxboard via `pip install mxboard`.') def __call__(self, param): """Callback to log training speed and metrics in TensorBoard.""" if param.eval_metric is None: return name_value = param.eval_metric.get_name_value() for name, value in name_value: if self.prefix is not None: name = '%s-%s' % (self.prefix, name) self.summary_writer.add_scalar(name, value, global_step=param.epoch)