Changeset 397f56dda527bb85b65aa7a4ab60c1f289361565

Show
Ignore:
Timestamp:
04/09/12 20:54:24 (2 years ago)
Author:
Theo Schlossnagle <jesus@omniti.com>
git-committer:
Theo Schlossnagle <jesus@omniti.com> 1334004864 +0000
git-parent:

[de520024f8801f5cc271ac9301e66f7c3690558e]

git-author:
Theo Schlossnagle <jesus@omniti.com> 1334004864 +0000
Message:

The spread of the checks across time was bunched to the beginning of
the second for no sound reason. More dastardly, every DNS based check
could tend to coallesce over time around the point of a DNS timeout.
Now checks stay true to their originally set period of their initial
schedule time.

This can be seen a runtime through the console command: "show timing_slots"

DNS-based checks still suffer from an immediate fire "at boot".

Files:

Legend:

Unmodified
Added
Removed
Modified
Copied
Moved
  • src/noit_check.c

    rbc131e5 r397f56d  
    7575/* 20 ms slots over 60 second for distribution */ 
    7676#define SCHEDULE_GRANULARITY 20 
     77#define SLOTS_PER_SECOND (1000/SCHEDULE_GRANULARITY) 
    7778#define MAX_MODULE_REGISTRATIONS 64 
    7879 
     
    123124static int 
    124125check_slots_find_smallest(int sec) { 
    125   int i, j, jbase = 0, mini = 0, minj = 0; 
     126  int i, j, cyclic, random_offset, jbase = 0, mini = 0, minj = 0; 
    126127  unsigned short min_running_i = 0xffff, min_running_j = 0xffff; 
    127128  for(i=0;i<60;i++) { 
     
    133134  } 
    134135  jbase = mini * (1000/SCHEDULE_GRANULARITY); 
    135   for(j=jbase;j<jbase+(1000/SCHEDULE_GRANULARITY);j++) { 
     136  random_offset = drand48() * SLOTS_PER_SECOND; 
     137  for(cyclic=0;cyclic<SLOTS_PER_SECOND;cyclic++) { 
     138    j = jbase + ((random_offset + cyclic) % SLOTS_PER_SECOND); 
    136139    if(check_slots_count[j] < min_running_j) { 
    137140      min_running_j = check_slots_count[j]; 
  • src/noit_check.h

    r204ecdc r397f56d  
    157157  void **module_metadata; 
    158158  noit_hash_table **module_configs; 
     159  struct timeval initial_schedule_time; 
    159160} noit_check_t; 
    160161 
  • src/noit_check_tools.c

    r46b9cf0 r397f56d  
    6262                              struct timeval *now) { 
    6363  recur_closure_t *rcl = closure; 
     64  int ms; 
    6465  rcl->check->fire_event = NULL; /* This is us, we get free post-return */ 
    6566  noit_check_resolve(rcl->check); 
    66   noit_check_schedule_next(rcl->self, &e->whence, rcl->check, now, 
    67                            rcl->dispatch, NULL); 
     67  ms = noit_check_schedule_next(rcl->self, NULL, rcl->check, now, 
     68                                rcl->dispatch, NULL); 
    6869  if(NOIT_CHECK_RESOLVED(rcl->check)) { 
    6970    if(NOIT_HOOK_CONTINUE == 
     
    7576                            rcl->check->target); 
    7677      } 
    77       rcl->dispatch(rcl->self, rcl->check, rcl->cause); 
     78      if(ms < rcl->check->timeout) 
     79        noitL(noit_error, "skipping %s, can't finish in %dms (timeout %dms)\n", 
     80              rcl->check->name, ms, rcl->check->timeout); 
     81      else { 
     82        rcl->dispatch(rcl->self, rcl->check, rcl->cause); 
     83      } 
    7884    } 
    7985    check_postflight_hook_invoke(rcl->self, rcl->check, rcl->cause); 
     
    9298                         noit_check_t *cause) { 
    9399  eventer_t newe; 
    94   struct timeval period, earliest; 
     100  struct timeval period, earliest, diff; 
     101  u_int64_t diffms, periodms, offsetms; 
    95102  recur_closure_t *rcl; 
    96103 
     
    98105  assert(check->fire_event == NULL); 
    99106  if(check->period == 0) return 0; 
     107 
     108  /* if last_check is not passed, we use the initial_schedule_time 
     109   * otherwise, we set the initial_schedule_time 
     110   */ 
     111  if(!last_check) last_check = &check->initial_schedule_time; 
     112  else memcpy(&check->initial_schedule_time, last_check, sizeof(*last_check)); 
     113 
    100114  if(NOIT_CHECK_DISABLED(check) || NOIT_CHECK_KILLED(check)) { 
    101115    if(!(check->flags & NP_TRANSIENT)) check_slots_dec_tv(last_check); 
     116    memset(&check->initial_schedule_time, 0, sizeof(struct timeval)); 
    102117    return 0; 
    103118  } 
     
    123138    period.tv_usec = (check->period % 1000) * 1000; 
    124139  } 
     140  periodms = period.tv_sec * 1000 + period.tv_usec / 1000; 
    125141 
    126142  newe = eventer_alloc(); 
     143  sub_timeval(earliest, *last_check, &diff); 
     144  /* calculat the differnet between the initial schedule time and "now" */ 
     145  diffms = diff.tv_sec * 1000 + diff.tv_usec / 1000; 
     146  /* determine the offset from initial schedule time that would place 
     147   * us at the next period-aligned point past "now" */ 
     148  offsetms = ((diffms / periodms) + 1) * periodms; 
     149  diff.tv_sec = offsetms / 1000; 
     150  diff.tv_usec = (offsetms % 1000) * 1000; 
     151  
    127152  memcpy(&newe->whence, last_check, sizeof(*last_check)); 
    128   add_timeval(newe->whence, period, &newe->whence); 
    129   if(compare_timeval(newe->whence, earliest) < 0) 
    130     memcpy(&newe->whence, &earliest, sizeof(earliest)); 
     153  add_timeval(newe->whence, diff, &newe->whence); 
     154 
     155  sub_timeval(newe->whence, earliest, &diff); 
     156  diffms = (int)diff.tv_sec * 1000 + (int)diff.tv_usec / 1000; 
     157  assert(compare_timeval(newe->whence, earliest) > 0); 
    131158  newe->mask = EVENTER_TIMER; 
    132159  newe->callback = noit_check_recur_handler; 
     
    140167  eventer_add(newe); 
    141168  check->fire_event = newe; 
    142   return 0
     169  return diffms
    143170} 
    144171