1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
|
#ifndef FIO_IOLOG_H
#define FIO_IOLOG_H
#include <stdio.h>
#include "lib/rbtree.h"
#include "lib/ieee754.h"
#include "flist.h"
#include "ioengines.h"
/*
* Use for maintaining statistics
*/
struct io_stat {
uint64_t max_val;
uint64_t min_val;
uint64_t samples;
fio_fp64_t mean;
fio_fp64_t S;
};
struct io_hist {
uint64_t samples;
unsigned long hist_last;
struct flist_head list;
};
union io_sample_data {
uint64_t val;
struct io_u_plat_entry *plat_entry;
};
#define sample_val(value) ((union io_sample_data) { .val = value })
#define sample_plat(plat) ((union io_sample_data) { .plat_entry = plat })
/*
* A single data sample
*/
struct io_sample {
uint64_t time;
union io_sample_data data;
uint32_t __ddir;
uint16_t priority;
uint64_t bs;
};
struct io_sample_offset {
struct io_sample s;
uint64_t offset;
};
enum {
IO_LOG_TYPE_LAT = 1,
IO_LOG_TYPE_CLAT,
IO_LOG_TYPE_SLAT,
IO_LOG_TYPE_BW,
IO_LOG_TYPE_IOPS,
IO_LOG_TYPE_HIST,
};
#define DEF_LOG_ENTRIES 1024
#define MAX_LOG_ENTRIES (1024 * DEF_LOG_ENTRIES)
struct io_logs {
struct flist_head list;
uint64_t nr_samples;
uint64_t max_samples;
void *log;
};
/*
* Dynamically growing data sample log
*/
struct io_log {
/*
* Entries already logged
*/
struct flist_head io_logs;
uint32_t cur_log_max;
/*
* When the current log runs out of space, store events here until
* we have a chance to regrow
*/
struct io_logs *pending;
unsigned int log_ddir_mask;
char *filename;
struct thread_data *td;
unsigned int log_type;
/*
* If we fail extending the log, stop collecting more entries.
*/
bool disabled;
/*
* Log offsets
*/
unsigned int log_offset;
/*
* Log I/O priorities
*/
unsigned int log_prio;
/*
* Max size of log entries before a chunk is compressed
*/
unsigned int log_gz;
/*
* Don't deflate for storing, just store the compressed bits
*/
unsigned int log_gz_store;
/*
* Windowed average, for logging single entries average over some
* period of time.
*/
struct io_stat avg_window[DDIR_RWDIR_CNT];
unsigned long avg_msec;
unsigned long avg_last[DDIR_RWDIR_CNT];
/*
* Windowed latency histograms, for keeping track of when we need to
* save a copy of the histogram every approximately hist_msec
* milliseconds.
*/
struct io_hist hist_window[DDIR_RWDIR_CNT];
unsigned long hist_msec;
unsigned int hist_coarseness;
pthread_mutex_t chunk_lock;
unsigned int chunk_seq;
struct flist_head chunk_list;
pthread_mutex_t deferred_free_lock;
#define IOLOG_MAX_DEFER 8
void *deferred_items[IOLOG_MAX_DEFER];
unsigned int deferred;
};
/*
* If the upper bit is set, then we have the offset as well
*/
#define LOG_OFFSET_SAMPLE_BIT 0x80000000U
/*
* If the bit following the upper bit is set, then we have the priority
*/
#define LOG_PRIO_SAMPLE_BIT 0x40000000U
#define LOG_SAMPLE_BITS (LOG_OFFSET_SAMPLE_BIT | LOG_PRIO_SAMPLE_BIT)
#define io_sample_ddir(io) ((io)->__ddir & ~LOG_SAMPLE_BITS)
static inline void io_sample_set_ddir(struct io_log *log,
struct io_sample *io,
enum fio_ddir ddir)
{
io->__ddir = ddir | log->log_ddir_mask;
}
static inline size_t __log_entry_sz(int log_offset)
{
if (log_offset)
return sizeof(struct io_sample_offset);
else
return sizeof(struct io_sample);
}
static inline size_t log_entry_sz(struct io_log *log)
{
return __log_entry_sz(log->log_offset);
}
static inline size_t log_sample_sz(struct io_log *log, struct io_logs *cur_log)
{
return cur_log->nr_samples * log_entry_sz(log);
}
static inline struct io_sample *__get_sample(void *samples, int log_offset,
uint64_t sample)
{
uint64_t sample_offset = sample * __log_entry_sz(log_offset);
return (struct io_sample *) ((char *) samples + sample_offset);
}
struct io_logs *iolog_cur_log(struct io_log *);
uint64_t iolog_nr_samples(struct io_log *);
void regrow_logs(struct thread_data *);
void regrow_agg_logs(void);
static inline struct io_sample *get_sample(struct io_log *iolog,
struct io_logs *cur_log,
uint64_t sample)
{
return __get_sample(cur_log->log, iolog->log_offset, sample);
}
enum {
IP_F_ONRB = 1,
IP_F_ONLIST = 2,
IP_F_TRIMMED = 4,
IP_F_IN_FLIGHT = 8,
};
/*
* When logging io actions, this matches a single sent io_u
*/
struct io_piece {
union {
struct fio_rb_node rb_node;
struct flist_head list;
};
struct flist_head trim_list;
union {
int fileno;
struct fio_file *file;
};
unsigned long long offset;
unsigned short numberio;
unsigned long len;
unsigned int flags;
enum fio_ddir ddir;
unsigned long delay;
unsigned int file_action;
};
/*
* Log exports
*/
enum file_log_act {
FIO_LOG_ADD_FILE,
FIO_LOG_OPEN_FILE,
FIO_LOG_CLOSE_FILE,
FIO_LOG_UNLINK_FILE,
};
struct io_u;
extern int __must_check read_iolog_get(struct thread_data *, struct io_u *);
extern void log_io_u(const struct thread_data *, const struct io_u *);
extern void log_file(struct thread_data *, struct fio_file *, enum file_log_act);
extern bool __must_check init_iolog(struct thread_data *td);
extern void log_io_piece(struct thread_data *, struct io_u *);
extern void unlog_io_piece(struct thread_data *, struct io_u *);
extern void trim_io_piece(const struct io_u *);
extern void queue_io_piece(struct thread_data *, struct io_piece *);
extern void prune_io_piece_log(struct thread_data *);
extern void write_iolog_close(struct thread_data *);
int64_t iolog_items_to_fetch(struct thread_data *td);
extern int iolog_compress_init(struct thread_data *, struct sk_out *);
extern void iolog_compress_exit(struct thread_data *);
extern size_t log_chunk_sizes(struct io_log *);
extern int init_io_u_buffers(struct thread_data *);
extern unsigned long long delay_since_ttime(const struct thread_data *,
unsigned long long);
#ifdef CONFIG_ZLIB
extern int iolog_file_inflate(const char *);
#endif
/*
* Logging
*/
struct log_params {
struct thread_data *td;
unsigned long avg_msec;
unsigned long hist_msec;
int hist_coarseness;
int log_type;
int log_offset;
int log_prio;
int log_gz;
int log_gz_store;
int log_compress;
};
static inline bool per_unit_log(struct io_log *log)
{
return log && (!log->avg_msec || log->log_gz || log->log_gz_store);
}
static inline bool inline_log(struct io_log *log)
{
return log->log_type == IO_LOG_TYPE_LAT ||
log->log_type == IO_LOG_TYPE_CLAT ||
log->log_type == IO_LOG_TYPE_SLAT;
}
static inline void ipo_bytes_align(unsigned int replay_align, struct io_piece *ipo)
{
if (!replay_align)
return;
ipo->offset &= ~(replay_align - (uint64_t)1);
}
extern void finalize_logs(struct thread_data *td, bool);
extern void setup_log(struct io_log **, struct log_params *, const char *);
extern void flush_log(struct io_log *, bool);
extern void flush_samples(FILE *, void *, uint64_t);
extern uint64_t hist_sum(int, int, uint64_t *, uint64_t *);
extern void free_log(struct io_log *);
extern void fio_writeout_logs(bool);
extern void td_writeout_logs(struct thread_data *, bool);
extern int iolog_cur_flush(struct io_log *, struct io_logs *);
static inline void init_ipo(struct io_piece *ipo)
{
INIT_FLIST_HEAD(&ipo->list);
INIT_FLIST_HEAD(&ipo->trim_list);
}
struct iolog_compress {
struct flist_head list;
void *buf;
size_t len;
unsigned int seq;
};
#endif
|