diff options
author | Cem Keylan <cem@ckyln.com> | 2021-03-09 08:43:43 +0300 |
---|---|---|
committer | Cem Keylan <cem@ckyln.com> | 2021-03-09 08:43:43 +0300 |
commit | 7ccc9c2432a847b85da5f445977d09f014cbc75b (patch) | |
tree | 8067d45ebeea60f1ddaa8167cb1398e2b6631473 /networking | |
parent | fef526c380e35cbdc0bdb8375487756c42659a75 (diff) | |
parent | 307cd26e9893ed0cf6ee88e7fca2d61d3da0e139 (diff) | |
download | busybox-7ccc9c2432a847b85da5f445977d09f014cbc75b.tar.gz |
Diffstat (limited to 'networking')
-rw-r--r-- | networking/dnsd.c | 17 | ||||
-rw-r--r-- | networking/ntpd.c | 337 | ||||
-rw-r--r-- | networking/udhcp/d6_dhcpc.c | 14 | ||||
-rw-r--r-- | networking/udhcp/dhcpc.c | 36 | ||||
-rw-r--r-- | networking/udhcp/dhcpd.c | 4 |
5 files changed, 113 insertions, 295 deletions
diff --git a/networking/dnsd.c b/networking/dnsd.c index 0ff0290fb..a0f320c6c 100644 --- a/networking/dnsd.c +++ b/networking/dnsd.c @@ -379,7 +379,8 @@ Domain name in a message can be represented as either: */ static int process_packet(struct dns_entry *conf_data, uint32_t conf_ttl, - uint8_t *buf) + uint8_t *buf, + unsigned buflen) { struct dns_head *head; struct type_and_class *unaligned_type_class; @@ -402,9 +403,6 @@ static int process_packet(struct dns_entry *conf_data, bb_simple_error_msg("response packet, ignored"); return 0; /* don't reply */ } - /* QR = 1 "response", RCODE = 4 "Not Implemented" */ - outr_flags = htons(0x8000 | 4); - err_msg = NULL; /* start of query string */ query_string = (void *)(head + 1); @@ -416,6 +414,15 @@ static int process_packet(struct dns_entry *conf_data, /* where to append answer block */ answb = (void *)(unaligned_type_class + 1); + if (buflen < answb - buf) { + bb_simple_error_msg("packet too short"); + return 0; /* don't reply */ + } + + /* QR = 1 "response", RCODE = 4 "Not Implemented" */ + outr_flags = htons(0x8000 | 4); + err_msg = NULL; + /* OPCODE != 0 "standard query"? */ if ((head->flags & htons(0x7800)) != 0) { err_msg = "opcode != 0"; @@ -559,7 +566,7 @@ int dnsd_main(int argc UNUSED_PARAM, char **argv) if (OPT_verbose) bb_simple_info_msg("got UDP packet"); buf[r] = '\0'; /* paranoia */ - r = process_packet(conf_data, conf_ttl, buf); + r = process_packet(conf_data, conf_ttl, buf, r); if (r <= 0) continue; send_to_from(udps, buf, r, 0, &from->u.sa, &to->u.sa, lsa->len); diff --git a/networking/ntpd.c b/networking/ntpd.c index 1f17b08ef..0f350fa6f 100644 --- a/networking/ntpd.c +++ b/networking/ntpd.c @@ -127,24 +127,15 @@ */ #define MAX_VERBOSE 3 - /* High-level description of the algorithm: * * We start running with very small poll_exp, BURSTPOLL, * in order to quickly accumulate INITIAL_SAMPLES datapoints * for each peer. Then, time is stepped if the offset is larger - * than STEP_THRESHOLD, otherwise it isn't; anyway, we enlarge - * poll_exp to MINPOLL and enter frequency measurement step: - * we collect new datapoints but ignore them for WATCH_THRESHOLD - * seconds. After WATCH_THRESHOLD seconds we look at accumulated - * offset and estimate frequency drift. - * - * (frequency measurement step seems to not be strictly needed, - * it is conditionally disabled with USING_INITIAL_FREQ_ESTIMATION - * define set to 0) + * than STEP_THRESHOLD, otherwise it isn't stepped. * - * After this, we enter "steady state": we collect a datapoint, - * we select the best peer, if this datapoint is not a new one + * Then poll_exp is set to MINPOLL, and we enter "steady state": we collect + * a datapoint, we select the best peer, if this datapoint is not a new one * (IOW: if this datapoint isn't for selected peer), sleep * and collect another one; otherwise, use its offset to update * frequency drift, if offset is somewhat large, reduce poll_exp, @@ -169,7 +160,7 @@ * datapoints after the step. */ -#define INITIAL_SAMPLES 4 /* how many samples do we want for init */ +#define INITIAL_SAMPLES 3 /* how many samples do we want for init */ #define MIN_FREQHOLD 10 /* adjust offset, but not freq in this many first adjustments */ #define BAD_DELAY_GROWTH 4 /* drop packet if its delay grew by more than this factor */ @@ -189,13 +180,10 @@ // ^^^^ used to be 0.125. // Since Linux 2.6.26 (circa 2006), kernel accepts (-0.5s, +0.5s) range -/* Stepout threshold (sec). std ntpd uses 900 (11 mins (!)) */ -//UNUSED: #define WATCH_THRESHOLD 128 -/* NB: set WATCH_THRESHOLD to ~60 when debugging to save time) */ -//UNUSED: #define PANIC_THRESHOLD 1000 /* panic threshold (sec) */ -/* - * If we got |offset| > BIGOFF from a peer, cap next query interval +// #define PANIC_THRESHOLD 1000 /* panic threshold (sec) */ + +/* If we got |offset| > BIGOFF from a peer, cap next query interval * for this peer by this many seconds: */ #define BIGOFF STEP_THRESHOLD @@ -204,18 +192,16 @@ #define FREQ_TOLERANCE 0.000015 /* frequency tolerance (15 PPM) */ #define BURSTPOLL 0 /* initial poll */ #define MINPOLL 5 /* minimum poll interval. std ntpd uses 6 (6: 64 sec) */ -/* - * If offset > discipline_jitter * POLLADJ_GATE, and poll interval is > 2^BIGPOLL, +/* If offset > discipline_jitter * POLLADJ_GATE, and poll interval is > 2^BIGPOLL, * then it is decreased _at once_. (If <= 2^BIGPOLL, it will be decreased _eventually_). */ #define BIGPOLL 9 /* 2^9 sec ~= 8.5 min */ #define MAXPOLL 12 /* maximum poll interval (12: 1.1h, 17: 36.4h). std ntpd uses 17 */ -/* - * Actively lower poll when we see such big offsets. +/* Actively lower poll when we see such big offsets. * With SLEW_THRESHOLD = 0.125, it means we try to sync more aggressively * if offset increases over ~0.04 sec */ -//#define POLLDOWN_OFFSET (SLEW_THRESHOLD / 3) +// #define POLLDOWN_OFFSET (SLEW_THRESHOLD / 3) #define MINDISP 0.01 /* minimum dispersion (sec) */ #define MAXDISP 16 /* maximum dispersion (sec) */ #define MAXSTRAT 16 /* maximum stratum (infinity metric) */ @@ -223,7 +209,16 @@ #define MIN_SELECTED 1 /* minimum intersection survivors */ #define MIN_CLUSTERED 3 /* minimum cluster survivors */ -#define MAXDRIFT 0.000500 /* frequency drift we can correct (500 PPM) */ +/* Correct frequency ourself (0) or let kernel do it (1)? */ +#define USING_KERNEL_PLL_LOOP 1 +// /* frequency drift we can correct (500 PPM) */ +// #define MAXDRIFT 0.000500 +// /* Compromise Allan intercept (sec). doc uses 1500, std ntpd uses 512 */ +// #define ALLAN 512 +// /* PLL loop gain */ +// #define PLL 65536 +// /* FLL loop gain [why it depends on MAXPOLL??] */ +// #define FLL (MAXPOLL + 1) /* Poll-adjust threshold. * When we see that offset is small enough compared to discipline jitter, @@ -239,12 +234,6 @@ */ #define POLLADJ_GATE 4 #define TIMECONST_HACK_GATE 2 -/* Compromise Allan intercept (sec). doc uses 1500, std ntpd uses 512 */ -#define ALLAN 512 -/* PLL loop gain */ -#define PLL 65536 -/* FLL loop gain [why it depends on MAXPOLL??] */ -#define FLL (MAXPOLL + 1) /* Parameter averaging constant */ #define AVG 4 @@ -372,10 +361,6 @@ typedef struct { char p_hostname[1]; } peer_t; - -#define USING_KERNEL_PLL_LOOP 1 -#define USING_INITIAL_FREQ_ESTIMATION 0 - enum { OPT_n = (1 << 0), OPT_q = (1 << 1), @@ -454,7 +439,7 @@ struct globals { */ #define G_precision_exp -9 /* - * G_precision_exp is used only for construction outgoing packets. + * G_precision_exp is used only for constructing outgoing packets. * It's ok to set G_precision_sec to a slightly different value * (One which is "nicer looking" in logs). * Exact value would be (1.0 / (1 << (- G_precision_exp))): @@ -462,12 +447,7 @@ struct globals { #define G_precision_sec 0.002 uint8_t stratum; -#define STATE_NSET 0 /* initial state, "nothing is set" */ -//#define STATE_FSET 1 /* frequency set from file */ -//#define STATE_SPIK 2 /* spike detected */ -//#define STATE_FREQ 3 /* initial frequency */ -#define STATE_SYNC 4 /* clock synchronized (normal operation) */ - uint8_t discipline_state; // doc calls it c.state + //uint8_t discipline_state; // doc calls it c.state uint8_t poll_exp; // s.poll int polladj_count; // c.count int FREQHOLD_cnt; @@ -490,7 +470,6 @@ struct globals { }; #define G (*ptr_to_globals) - #define VERB1 if (MAX_VERBOSE && G.verbose) #define VERB2 if (MAX_VERBOSE >= 2 && G.verbose >= 2) #define VERB3 if (MAX_VERBOSE >= 3 && G.verbose >= 3) @@ -657,104 +636,11 @@ filter_datapoints(peer_t *p) double sum, wavg; datapoint_t *fdp; -#if 0 /* Simulations have shown that use of *averaged* offset for p->filter_offset * is in fact worse than simply using last received one: with large poll intervals * (>= 2048) averaging code uses offset values which are outdated by hours, * and time/frequency correction goes totally wrong when fed essentially bogus offsets. */ - int got_newest; - double minoff, maxoff, w; - double x = x; /* for compiler */ - double oldest_off = oldest_off; - double oldest_age = oldest_age; - double newest_off = newest_off; - double newest_age = newest_age; - - fdp = p->filter_datapoint; - - minoff = maxoff = fdp[0].d_offset; - for (i = 1; i < NUM_DATAPOINTS; i++) { - if (minoff > fdp[i].d_offset) - minoff = fdp[i].d_offset; - if (maxoff < fdp[i].d_offset) - maxoff = fdp[i].d_offset; - } - - idx = p->datapoint_idx; /* most recent datapoint's index */ - /* Average offset: - * Drop two outliers and take weighted average of the rest: - * most_recent/2 + older1/4 + older2/8 ... + older5/32 + older6/32 - * we use older6/32, not older6/64 since sum of weights should be 1: - * 1/2 + 1/4 + 1/8 + 1/16 + 1/32 + 1/32 = 1 - */ - wavg = 0; - w = 0.5; - /* n-1 - * --- dispersion(i) - * filter_dispersion = \ ------------- - * / (i+1) - * --- 2 - * i=0 - */ - got_newest = 0; - sum = 0; - for (i = 0; i < NUM_DATAPOINTS; i++) { - VERB5 { - bb_error_msg("datapoint[%d]: off:%f disp:%f(%f) age:%f%s", - i, - fdp[idx].d_offset, - fdp[idx].d_dispersion, dispersion(&fdp[idx]), - G.cur_time - fdp[idx].d_recv_time, - (minoff == fdp[idx].d_offset || maxoff == fdp[idx].d_offset) - ? " (outlier by offset)" : "" - ); - } - - sum += dispersion(&fdp[idx]) / (2 << i); - - if (minoff == fdp[idx].d_offset) { - minoff -= 1; /* so that we don't match it ever again */ - } else - if (maxoff == fdp[idx].d_offset) { - maxoff += 1; - } else { - oldest_off = fdp[idx].d_offset; - oldest_age = G.cur_time - fdp[idx].d_recv_time; - if (!got_newest) { - got_newest = 1; - newest_off = oldest_off; - newest_age = oldest_age; - } - x = oldest_off * w; - wavg += x; - w /= 2; - } - - idx = (idx - 1) & (NUM_DATAPOINTS - 1); - } - p->filter_dispersion = sum; - wavg += x; /* add another older6/64 to form older6/32 */ - /* Fix systematic underestimation with large poll intervals. - * Imagine that we still have a bit of uncorrected drift, - * and poll interval is big (say, 100 sec). Offsets form a progression: - * 0.0 0.1 0.2 0.3 0.4 0.5 0.6 0.7 - 0.7 is most recent. - * The algorithm above drops 0.0 and 0.7 as outliers, - * and then we have this estimation, ~25% off from 0.7: - * 0.1/32 + 0.2/32 + 0.3/16 + 0.4/8 + 0.5/4 + 0.6/2 = 0.503125 - */ - x = oldest_age - newest_age; - if (x != 0) { - x = newest_age / x; /* in above example, 100 / (600 - 100) */ - if (x < 1) { /* paranoia check */ - x = (newest_off - oldest_off) * x; /* 0.5 * 100/500 = 0.1 */ - wavg += x; - } - } - p->filter_offset = wavg; - -#else - fdp = p->filter_datapoint; idx = p->datapoint_idx; /* most recent datapoint's index */ @@ -777,7 +663,6 @@ filter_datapoints(peer_t *p) } wavg /= NUM_DATAPOINTS; p->filter_dispersion = sum; -#endif /* +----- -----+ ^ 1/2 * | n-1 | @@ -1089,7 +974,6 @@ send_query_to_peer(peer_t *p) set_next(p, RESPONSE_INTERVAL); } - /* Note that there is no provision to prevent several run_scripts * to be started in quick succession. In fact, it happens rather often * if initial syncronization results in a step. @@ -1548,15 +1432,14 @@ select_and_cluster(void) * Local clock discipline and its helpers */ static void -set_new_values(int disc_state, double offset, double recv_time) +set_new_values(double offset, double recv_time) { /* Enter new state and set state variables. Note we use the time * of the last clock filter sample, which must be earlier than * the current time. */ - VERB4 bb_error_msg("disc_state=%d last update offset=%f recv_time=%f", - disc_state, offset, recv_time); - G.discipline_state = disc_state; + VERB4 bb_error_msg("last update offset=%f recv_time=%f", + offset, recv_time); G.last_update_offset = offset; G.last_update_recv_time = recv_time; } @@ -1572,8 +1455,6 @@ update_local_clock(peer_t *p) double abs_offset; #if !USING_KERNEL_PLL_LOOP double freq_drift; -#endif -#if !USING_KERNEL_PLL_LOOP || USING_INITIAL_FREQ_ESTIMATION double since_last_update; #endif double etemp, dtemp; @@ -1603,63 +1484,15 @@ update_local_clock(peer_t *p) * action is and defines how the system reacts to large time * and frequency errors. */ -#if !USING_KERNEL_PLL_LOOP || USING_INITIAL_FREQ_ESTIMATION - since_last_update = recv_time - G.reftime; -#endif #if !USING_KERNEL_PLL_LOOP + since_last_update = recv_time - G.reftime; freq_drift = 0; #endif -#if USING_INITIAL_FREQ_ESTIMATION - if (G.discipline_state == STATE_FREQ) { - /* Ignore updates until the stepout threshold */ - if (since_last_update < WATCH_THRESHOLD) { - VERB4 bb_error_msg("measuring drift, datapoint ignored, %f sec remains", - WATCH_THRESHOLD - since_last_update); - return 0; /* "leave poll interval as is" */ - } -# if !USING_KERNEL_PLL_LOOP - freq_drift = (offset - G.last_update_offset) / since_last_update; -# endif - } -#endif /* There are two main regimes: when the * offset exceeds the step threshold and when it does not. */ if (abs_offset > STEP_THRESHOLD) { -#if 0 - double remains; - -// This "spike state" seems to be useless, peer selection already drops -// occassional "bad" datapoints. If we are here, there were _many_ -// large offsets. When a few first large offsets are seen, -// we end up in "no valid datapoints, no peer selected" state. -// Only when enough of them are seen (which means it's not a fluke), -// we end up here. Looks like _our_ clock is off. - switch (G.discipline_state) { - case STATE_SYNC: - /* The first outlyer: ignore it, switch to SPIK state */ - VERB3 bb_error_msg("update from %s: offset:%+f, spike%s", - p->p_dotted, offset, - ""); - G.discipline_state = STATE_SPIK; - return -1; /* "decrease poll interval" */ - - case STATE_SPIK: - /* Ignore succeeding outlyers until either an inlyer - * is found or the stepout threshold is exceeded. - */ - remains = WATCH_THRESHOLD - since_last_update; - if (remains > 0) { - VERB3 bb_error_msg("update from %s: offset:%+f, spike%s", - p->p_dotted, offset, - ", datapoint ignored"); - return -1; /* "decrease poll interval" */ - } - /* fall through: we need to step */ - } /* switch */ -#endif - /* Step the time and clamp down the poll interval. * * In NSET state an initial frequency correction is @@ -1694,16 +1527,17 @@ update_local_clock(peer_t *p) recv_time += offset; -#if USING_INITIAL_FREQ_ESTIMATION - if (G.discipline_state == STATE_NSET) { - set_new_values(STATE_FREQ, /*offset:*/ 0, recv_time); - return 1; /* "ok to increase poll interval" */ - } -#endif abs_offset = offset = 0; - set_new_values(STATE_SYNC, offset, recv_time); + set_new_values(offset, recv_time); } else { /* abs_offset <= STEP_THRESHOLD */ + if (option_mask32 & OPT_q) { + /* We were only asked to set time once. + * The clock is precise enough, no need to step. + */ + exit(0); + } + /* The ratio is calculated before jitter is updated to make * poll adjust code more sensitive to large offsets. */ @@ -1718,75 +1552,31 @@ update_local_clock(peer_t *p) if (G.discipline_jitter < G_precision_sec) G.discipline_jitter = G_precision_sec; - switch (G.discipline_state) { - case STATE_NSET: - if (option_mask32 & OPT_q) { - /* We were only asked to set time once. - * The clock is precise enough, no need to step. - */ - exit(0); - } -#if USING_INITIAL_FREQ_ESTIMATION - /* This is the first update received and the frequency - * has not been initialized. The first thing to do - * is directly measure the oscillator frequency. - */ - set_new_values(STATE_FREQ, offset, recv_time); -#else - set_new_values(STATE_SYNC, offset, recv_time); -#endif - VERB4 bb_simple_error_msg("transitioning to FREQ, datapoint ignored"); - return 0; /* "leave poll interval as is" */ - -#if 0 /* this is dead code for now */ - case STATE_FSET: - /* This is the first update and the frequency - * has been initialized. Adjust the phase, but - * don't adjust the frequency until the next update. - */ - set_new_values(STATE_SYNC, offset, recv_time); - /* freq_drift remains 0 */ - break; -#endif - -#if USING_INITIAL_FREQ_ESTIMATION - case STATE_FREQ: - /* since_last_update >= WATCH_THRESHOLD, we waited enough. - * Correct the phase and frequency and switch to SYNC state. - * freq_drift was already estimated (see code above) - */ - set_new_values(STATE_SYNC, offset, recv_time); - break; -#endif - - default: #if !USING_KERNEL_PLL_LOOP - /* Compute freq_drift due to PLL and FLL contributions. - * - * The FLL and PLL frequency gain constants - * depend on the poll interval and Allan - * intercept. The FLL is not used below one-half - * the Allan intercept. Above that the loop gain - * increases in steps to 1 / AVG. - */ - if ((1 << G.poll_exp) > ALLAN / 2) { - etemp = FLL - G.poll_exp; - if (etemp < AVG) - etemp = AVG; - freq_drift += (offset - G.last_update_offset) / (MAXD(since_last_update, ALLAN) * etemp); - } - /* For the PLL the integration interval - * (numerator) is the minimum of the update - * interval and poll interval. This allows - * oversampling, but not undersampling. - */ - etemp = MIND(since_last_update, (1 << G.poll_exp)); - dtemp = (4 * PLL) << G.poll_exp; - freq_drift += offset * etemp / SQUARE(dtemp); -#endif - set_new_values(STATE_SYNC, offset, recv_time); - break; + /* Compute freq_drift due to PLL and FLL contributions. + * + * The FLL and PLL frequency gain constants + * depend on the poll interval and Allan + * intercept. The FLL is not used below one-half + * the Allan intercept. Above that the loop gain + * increases in steps to 1 / AVG. + */ + if ((1 << G.poll_exp) > ALLAN / 2) { + etemp = FLL - G.poll_exp; + if (etemp < AVG) + etemp = AVG; + freq_drift += (offset - G.last_update_offset) / (MAXD(since_last_update, ALLAN) * etemp); } + /* For the PLL the integration interval + * (numerator) is the minimum of the update + * interval and poll interval. This allows + * oversampling, but not undersampling. + */ + etemp = MIND(since_last_update, (1 << G.poll_exp)); + dtemp = (4 * PLL) << G.poll_exp; + freq_drift += offset * etemp / SQUARE(dtemp); +#endif + set_new_values(offset, recv_time); if (G.stratum != p->lastpkt_stratum + 1) { G.stratum = p->lastpkt_stratum + 1; run_script("stratum", offset); @@ -1805,9 +1595,7 @@ update_local_clock(peer_t *p) G.rootdisp = p->lastpkt_rootdisp + dtemp; VERB4 bb_error_msg("updating leap/refid/reftime/rootdisp from peer %s", p->p_dotted); - /* We are in STATE_SYNC now, but did not do adjtimex yet. - * (Any other state does not reach this, they all return earlier) - * By this time, freq_drift and offset are set + /* By this time, freq_drift and offset are set * to values suitable for adjtimex. */ #if !USING_KERNEL_PLL_LOOP @@ -1963,7 +1751,6 @@ update_local_clock(peer_t *p) return 1; /* "ok to increase poll interval" */ } - /* * We've got a new reply packet from a peer, process it * (helpers first) @@ -2349,6 +2136,12 @@ recv_and_process_client_pkt(void /*int fd*/) do_sendto(G_listen_fd, /*from:*/ &to->u.sa, /*to:*/ from, /*addrlen:*/ to->len, &msg, size); + VERB3 { + char *addr; + addr = xmalloc_sockaddr2dotted_noport(from); + bb_error_msg("responded to query from %s", addr); + free(addr); + } bail: free(to); @@ -2767,7 +2560,7 @@ int ntpd_main(int argc UNUSED_PARAM, char **argv) timeout++; /* (nextaction - G.cur_time) rounds down, compensating */ /* Here we may block */ - VERB2 { + VERB3 { if (i > (ENABLE_FEATURE_NTPD_SERVER && G_listen_fd != -1)) { /* We wait for at least one reply. * Poll for it, without wasting time for message. diff --git a/networking/udhcp/d6_dhcpc.c b/networking/udhcp/d6_dhcpc.c index fbdaa99bd..76b087b92 100644 --- a/networking/udhcp/d6_dhcpc.c +++ b/networking/udhcp/d6_dhcpc.c @@ -1589,8 +1589,10 @@ int udhcpc6_main(int argc UNUSED_PARAM, char **argv) } if ((packet.d6_xid32 & htonl(0x00ffffff)) != xid) { - log1("xid %x (our is %x), ignoring packet", - (unsigned)(packet.d6_xid32 & htonl(0x00ffffff)), (unsigned)xid); + log1("xid %x (our is %x)%s", + (unsigned)(packet.d6_xid32 & htonl(0x00ffffff)), (unsigned)xid, + ", ignoring packet" + ); continue; } @@ -1743,7 +1745,7 @@ int udhcpc6_main(int argc UNUSED_PARAM, char **argv) free(client6_data.ia_na); client6_data.ia_na = d6_copy_option(packet.d6_options, packet_end, D6_OPT_IA_NA); if (!client6_data.ia_na) { - bb_info_msg("no %s option, ignoring packet", "IA_NA"); + bb_info_msg("no %s option%s", "IA_NA", ", ignoring packet"); continue; } if (client6_data.ia_na->len < (4 + 4 + 4) + (2 + 2 + 16 + 4 + 4)) { @@ -1756,7 +1758,7 @@ int udhcpc6_main(int argc UNUSED_PARAM, char **argv) D6_OPT_IAADDR ); if (!iaaddr) { - bb_info_msg("no %s option, ignoring packet", "IAADDR"); + bb_info_msg("no %s option%s", "IAADDR", ", ignoring packet"); continue; } if (iaaddr->len < (16 + 4 + 4)) { @@ -1781,7 +1783,7 @@ int udhcpc6_main(int argc UNUSED_PARAM, char **argv) free(client6_data.ia_pd); client6_data.ia_pd = d6_copy_option(packet.d6_options, packet_end, D6_OPT_IA_PD); if (!client6_data.ia_pd) { - bb_info_msg("no %s option, ignoring packet", "IA_PD"); + bb_info_msg("no %s option%s", "IA_PD", ", ignoring packet"); continue; } if (client6_data.ia_pd->len < (4 + 4 + 4) + (2 + 2 + 4 + 4 + 1 + 16)) { @@ -1794,7 +1796,7 @@ int udhcpc6_main(int argc UNUSED_PARAM, char **argv) D6_OPT_IAPREFIX ); if (!iaprefix) { - bb_info_msg("no %s option, ignoring packet", "IAPREFIX"); + bb_info_msg("no %s option%s", "IAPREFIX", ", ignoring packet"); continue; } if (iaprefix->len < (4 + 4 + 1 + 16)) { diff --git a/networking/udhcp/dhcpc.c b/networking/udhcp/dhcpc.c index 922c71ebd..bbcbd1fca 100644 --- a/networking/udhcp/dhcpc.c +++ b/networking/udhcp/dhcpc.c @@ -729,7 +729,7 @@ static NOINLINE int send_discover(uint32_t xid, uint32_t requested) */ add_client_options(&packet); - bb_info_msg("sending %s", "discover"); + bb_simple_info_msg("broadcasting discover"); return raw_bcast_from_client_data_ifindex(&packet, INADDR_ANY); } @@ -742,6 +742,7 @@ static NOINLINE int send_select(uint32_t xid, uint32_t server, uint32_t requeste { struct dhcp_packet packet; struct in_addr temp_addr; + char server_str[sizeof("255.255.255.255")]; /* * RFC 2131 4.3.2 DHCPREQUEST message @@ -772,8 +773,13 @@ static NOINLINE int send_select(uint32_t xid, uint32_t server, uint32_t requeste */ add_client_options(&packet); + temp_addr.s_addr = server; + strcpy(server_str, inet_ntoa(temp_addr)); temp_addr.s_addr = requested; - bb_info_msg("sending select for %s", inet_ntoa(temp_addr)); + bb_info_msg("broadcasting select for %s, server %s", + inet_ntoa(temp_addr), + server_str + ); return raw_bcast_from_client_data_ifindex(&packet, INADDR_ANY); } @@ -782,7 +788,6 @@ static NOINLINE int send_select(uint32_t xid, uint32_t server, uint32_t requeste static NOINLINE int send_renew(uint32_t xid, uint32_t server, uint32_t ciaddr) { struct dhcp_packet packet; - struct in_addr temp_addr; /* * RFC 2131 4.3.2 DHCPREQUEST message @@ -813,8 +818,14 @@ static NOINLINE int send_renew(uint32_t xid, uint32_t server, uint32_t ciaddr) */ add_client_options(&packet); - temp_addr.s_addr = server; - bb_info_msg("sending renew to %s", inet_ntoa(temp_addr)); + if (server) { + struct in_addr temp_addr; + temp_addr.s_addr = server; + bb_info_msg("sending renew to server %s", inet_ntoa(temp_addr)); + } else { + bb_simple_info_msg("broadcasting renew"); + } + return bcast_or_ucast(&packet, ciaddr, server); } @@ -843,7 +854,7 @@ static NOINLINE int send_decline(/*uint32_t xid,*/ uint32_t server, uint32_t req udhcp_add_simple_option(&packet, DHCP_SERVER_ID, server); - bb_info_msg("sending %s", "decline"); + bb_simple_info_msg("broadcasting decline"); return raw_bcast_from_client_data_ifindex(&packet, INADDR_ANY); } #endif @@ -1644,8 +1655,10 @@ int udhcpc_main(int argc UNUSED_PARAM, char **argv) } if (packet.xid != xid) { - log1("xid %x (our is %x), ignoring packet", - (unsigned)packet.xid, (unsigned)xid); + log1("xid %x (our is %x)%s", + (unsigned)packet.xid, (unsigned)xid, + ", ignoring packet" + ); continue; } @@ -1720,6 +1733,7 @@ int udhcpc_main(int argc UNUSED_PARAM, char **argv) unsigned start; uint32_t lease_seconds; struct in_addr temp_addr; + char server_str[sizeof("255.255.255.255")]; uint8_t *temp; temp = udhcp_get_option32(&packet, DHCP_LEASE_TIME); @@ -1775,9 +1789,11 @@ int udhcpc_main(int argc UNUSED_PARAM, char **argv) } #endif /* enter bound state */ + temp_addr.s_addr = server_addr; + strcpy(server_str, inet_ntoa(temp_addr)); temp_addr.s_addr = packet.yiaddr; - bb_info_msg("lease of %s obtained, lease time %u", - inet_ntoa(temp_addr), (unsigned)lease_seconds); + bb_info_msg("lease of %s obtained from %s, lease time %u", + inet_ntoa(temp_addr), server_str, (unsigned)lease_seconds); requested_ip = packet.yiaddr; start = monotonic_sec(); diff --git a/networking/udhcp/dhcpd.c b/networking/udhcp/dhcpd.c index cd32cb437..260130507 100644 --- a/networking/udhcp/dhcpd.c +++ b/networking/udhcp/dhcpd.c @@ -1048,7 +1048,7 @@ int udhcpd_main(int argc UNUSED_PARAM, char **argv) move_from_unaligned32(server_id_network_order, server_id_opt); if (server_id_network_order != server_data.server_nip) { /* client talks to somebody else */ - log1("server ID doesn't match%s", ", ignoring"); + log1("server ID doesn't match%s", ", ignoring packet"); continue; } } @@ -1171,7 +1171,7 @@ o DHCPREQUEST generated during REBINDING state: if (!requested_ip_opt) { requested_nip = packet.ciaddr; if (requested_nip == 0) { - log1("no requested IP and no ciaddr%s", ", ignoring"); + log1("no requested IP and no ciaddr%s", ", ignoring packet"); break; } } |