You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
tcp: bool conversions
bool conversions where possible. __inline__ -> inline space cleanups Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
e005d193d5
commit
a2a385d627
+10
-10
@@ -593,7 +593,7 @@ static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
|
||||
tp->pushed_seq = tp->write_seq;
|
||||
}
|
||||
|
||||
static inline int forced_push(const struct tcp_sock *tp)
|
||||
static inline bool forced_push(const struct tcp_sock *tp)
|
||||
{
|
||||
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
|
||||
}
|
||||
@@ -1082,7 +1082,7 @@ new_segment:
|
||||
if (err)
|
||||
goto do_fault;
|
||||
} else {
|
||||
int merge = 0;
|
||||
bool merge = false;
|
||||
int i = skb_shinfo(skb)->nr_frags;
|
||||
struct page *page = sk->sk_sndmsg_page;
|
||||
int off;
|
||||
@@ -1096,7 +1096,7 @@ new_segment:
|
||||
off != PAGE_SIZE) {
|
||||
/* We can extend the last page
|
||||
* fragment. */
|
||||
merge = 1;
|
||||
merge = true;
|
||||
} else if (i == MAX_SKB_FRAGS || !sg) {
|
||||
/* Need to add new fragment and cannot
|
||||
* do this because interface is non-SG,
|
||||
@@ -1293,7 +1293,7 @@ static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
|
||||
void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int time_to_ack = 0;
|
||||
bool time_to_ack = false;
|
||||
|
||||
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
|
||||
|
||||
@@ -1319,7 +1319,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
|
||||
!icsk->icsk_ack.pingpong)) &&
|
||||
!atomic_read(&sk->sk_rmem_alloc)))
|
||||
time_to_ack = 1;
|
||||
time_to_ack = true;
|
||||
}
|
||||
|
||||
/* We send an ACK if we can now advertise a non-zero window
|
||||
@@ -1341,7 +1341,7 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
|
||||
* "Lots" means "at least twice" here.
|
||||
*/
|
||||
if (new_window && new_window >= 2 * rcv_window_now)
|
||||
time_to_ack = 1;
|
||||
time_to_ack = true;
|
||||
}
|
||||
}
|
||||
if (time_to_ack)
|
||||
@@ -2171,7 +2171,7 @@ EXPORT_SYMBOL(tcp_close);
|
||||
|
||||
/* These states need RST on ABORT according to RFC793 */
|
||||
|
||||
static inline int tcp_need_reset(int state)
|
||||
static inline bool tcp_need_reset(int state)
|
||||
{
|
||||
return (1 << state) &
|
||||
(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
|
||||
@@ -2245,7 +2245,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_disconnect);
|
||||
|
||||
static inline int tcp_can_repair_sock(struct sock *sk)
|
||||
static inline bool tcp_can_repair_sock(const struct sock *sk)
|
||||
{
|
||||
return capable(CAP_NET_ADMIN) &&
|
||||
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
|
||||
@@ -3172,13 +3172,13 @@ out_free:
|
||||
struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk)
|
||||
{
|
||||
struct tcp_md5sig_pool __percpu *pool;
|
||||
int alloc = 0;
|
||||
bool alloc = false;
|
||||
|
||||
retry:
|
||||
spin_lock_bh(&tcp_md5sig_pool_lock);
|
||||
pool = tcp_md5sig_pool;
|
||||
if (tcp_md5sig_users++ == 0) {
|
||||
alloc = 1;
|
||||
alloc = true;
|
||||
spin_unlock_bh(&tcp_md5sig_pool_lock);
|
||||
} else if (!pool) {
|
||||
tcp_md5sig_users--;
|
||||
|
||||
+3
-3
@@ -280,19 +280,19 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
|
||||
/* RFC2861 Check whether we are limited by application or congestion window
|
||||
* This is the inverse of cwnd check in tcp_tso_should_defer
|
||||
*/
|
||||
int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
|
||||
bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
u32 left;
|
||||
|
||||
if (in_flight >= tp->snd_cwnd)
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
left = tp->snd_cwnd - in_flight;
|
||||
if (sk_can_gso(sk) &&
|
||||
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
|
||||
left * tp->mss_cache < sk->sk_gso_max_size)
|
||||
return 1;
|
||||
return true;
|
||||
return left <= tcp_max_tso_deferred_mss(tp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
|
||||
/* Tcp Hybla structure. */
|
||||
struct hybla {
|
||||
u8 hybla_en;
|
||||
bool hybla_en;
|
||||
u32 snd_cwnd_cents; /* Keeps increment values when it is <1, <<7 */
|
||||
u32 rho; /* Rho parameter, integer part */
|
||||
u32 rho2; /* Rho * Rho, integer part */
|
||||
@@ -24,8 +24,7 @@ struct hybla {
|
||||
u32 minrtt; /* Minimum smoothed round trip time value seen */
|
||||
};
|
||||
|
||||
/* Hybla reference round trip time (default= 1/40 sec = 25 ms),
|
||||
expressed in jiffies */
|
||||
/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
|
||||
static int rtt0 = 25;
|
||||
module_param(rtt0, int, 0644);
|
||||
MODULE_PARM_DESC(rtt0, "reference rout trip time (ms)");
|
||||
@@ -39,7 +38,7 @@ static inline void hybla_recalc_param (struct sock *sk)
|
||||
ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
|
||||
ca->rho = ca->rho_3ls >> 3;
|
||||
ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
|
||||
ca->rho2 = ca->rho2_7ls >>7;
|
||||
ca->rho2 = ca->rho2_7ls >> 7;
|
||||
}
|
||||
|
||||
static void hybla_init(struct sock *sk)
|
||||
@@ -52,7 +51,7 @@ static void hybla_init(struct sock *sk)
|
||||
ca->rho_3ls = 0;
|
||||
ca->rho2_7ls = 0;
|
||||
ca->snd_cwnd_cents = 0;
|
||||
ca->hybla_en = 1;
|
||||
ca->hybla_en = true;
|
||||
tp->snd_cwnd = 2;
|
||||
tp->snd_cwnd_clamp = 65535;
|
||||
|
||||
@@ -67,6 +66,7 @@ static void hybla_init(struct sock *sk)
|
||||
static void hybla_state(struct sock *sk, u8 ca_state)
|
||||
{
|
||||
struct hybla *ca = inet_csk_ca(sk);
|
||||
|
||||
ca->hybla_en = (ca_state == TCP_CA_Open);
|
||||
}
|
||||
|
||||
|
||||
+108
-106
File diff suppressed because it is too large
Load Diff
+13
-13
@@ -866,14 +866,14 @@ static void tcp_v4_reqsk_destructor(struct request_sock *req)
|
||||
}
|
||||
|
||||
/*
|
||||
* Return 1 if a syncookie should be sent
|
||||
* Return true if a syncookie should be sent
|
||||
*/
|
||||
int tcp_syn_flood_action(struct sock *sk,
|
||||
bool tcp_syn_flood_action(struct sock *sk,
|
||||
const struct sk_buff *skb,
|
||||
const char *proto)
|
||||
{
|
||||
const char *msg = "Dropping request";
|
||||
int want_cookie = 0;
|
||||
bool want_cookie = false;
|
||||
struct listen_sock *lopt;
|
||||
|
||||
|
||||
@@ -881,7 +881,7 @@ int tcp_syn_flood_action(struct sock *sk,
|
||||
#ifdef CONFIG_SYN_COOKIES
|
||||
if (sysctl_tcp_syncookies) {
|
||||
msg = "Sending cookies";
|
||||
want_cookie = 1;
|
||||
want_cookie = true;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
|
||||
} else
|
||||
#endif
|
||||
@@ -1196,7 +1196,7 @@ clear_hash_noput:
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
|
||||
|
||||
static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
|
||||
static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
/*
|
||||
* This gets called for each TCP segment that arrives
|
||||
@@ -1219,16 +1219,16 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
|
||||
|
||||
/* We've parsed the options - do we have a hash? */
|
||||
if (!hash_expected && !hash_location)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (hash_expected && !hash_location) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!hash_expected && hash_location) {
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Okay, so this is hash_expected and hash_location -
|
||||
@@ -1244,9 +1244,9 @@ static int tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
|
||||
&iph->daddr, ntohs(th->dest),
|
||||
genhash ? " tcp_v4_calc_md5_hash failed"
|
||||
: "");
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -1280,7 +1280,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
__be32 saddr = ip_hdr(skb)->saddr;
|
||||
__be32 daddr = ip_hdr(skb)->daddr;
|
||||
__u32 isn = TCP_SKB_CB(skb)->when;
|
||||
int want_cookie = 0;
|
||||
bool want_cookie = false;
|
||||
|
||||
/* Never answer to SYNs send to broadcast or multicast */
|
||||
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
|
||||
@@ -1339,7 +1339,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
while (l-- > 0)
|
||||
*c++ ^= *hash_location++;
|
||||
|
||||
want_cookie = 0; /* not our kind of cookie */
|
||||
want_cookie = false; /* not our kind of cookie */
|
||||
tmp_ext.cookie_out_never = 0; /* false */
|
||||
tmp_ext.cookie_plus = tmp_opt.cookie_plus;
|
||||
} else if (!tp->rx_opt.cookie_in_always) {
|
||||
@@ -2073,7 +2073,7 @@ static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int empty_bucket(struct tcp_iter_state *st)
|
||||
static inline bool empty_bucket(struct tcp_iter_state *st)
|
||||
{
|
||||
return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
|
||||
hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
|
||||
|
||||
+12
-12
@@ -55,7 +55,7 @@ EXPORT_SYMBOL_GPL(tcp_death_row);
|
||||
* state.
|
||||
*/
|
||||
|
||||
static int tcp_remember_stamp(struct sock *sk)
|
||||
static bool tcp_remember_stamp(struct sock *sk)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
@@ -72,13 +72,13 @@ static int tcp_remember_stamp(struct sock *sk)
|
||||
}
|
||||
if (release_it)
|
||||
inet_putpeer(peer);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
|
||||
static bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
|
||||
{
|
||||
struct sock *sk = (struct sock *) tw;
|
||||
struct inet_peer *peer;
|
||||
@@ -94,17 +94,17 @@ static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
|
||||
peer->tcp_ts = tcptw->tw_ts_recent;
|
||||
}
|
||||
inet_putpeer(peer);
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
|
||||
static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
|
||||
{
|
||||
if (seq == s_win)
|
||||
return 1;
|
||||
return true;
|
||||
if (after(end_seq, s_win) && before(seq, e_win))
|
||||
return 1;
|
||||
return true;
|
||||
return seq == e_win && seq == end_seq;
|
||||
}
|
||||
|
||||
@@ -143,7 +143,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
|
||||
struct tcp_options_received tmp_opt;
|
||||
const u8 *hash_location;
|
||||
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
|
||||
int paws_reject = 0;
|
||||
bool paws_reject = false;
|
||||
|
||||
tmp_opt.saw_tstamp = 0;
|
||||
if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) {
|
||||
@@ -316,7 +316,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
|
||||
struct inet_timewait_sock *tw = NULL;
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
int recycle_ok = 0;
|
||||
bool recycle_ok = false;
|
||||
|
||||
if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
|
||||
recycle_ok = tcp_remember_stamp(sk);
|
||||
@@ -575,7 +575,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
|
||||
struct sock *child;
|
||||
const struct tcphdr *th = tcp_hdr(skb);
|
||||
__be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
|
||||
int paws_reject = 0;
|
||||
bool paws_reject = false;
|
||||
|
||||
tmp_opt.saw_tstamp = 0;
|
||||
if (th->doff > (sizeof(struct tcphdr)>>2)) {
|
||||
|
||||
+38
-37
@@ -370,7 +370,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
|
||||
TCP_SKB_CB(skb)->end_seq = seq;
|
||||
}
|
||||
|
||||
static inline int tcp_urg_mode(const struct tcp_sock *tp)
|
||||
static inline bool tcp_urg_mode(const struct tcp_sock *tp)
|
||||
{
|
||||
return tp->snd_una != tp->snd_up;
|
||||
}
|
||||
@@ -1391,20 +1391,20 @@ static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* Minshall's variant of the Nagle send check. */
|
||||
static inline int tcp_minshall_check(const struct tcp_sock *tp)
|
||||
static inline bool tcp_minshall_check(const struct tcp_sock *tp)
|
||||
{
|
||||
return after(tp->snd_sml, tp->snd_una) &&
|
||||
!after(tp->snd_sml, tp->snd_nxt);
|
||||
}
|
||||
|
||||
/* Return 0, if packet can be sent now without violation Nagle's rules:
|
||||
/* Return false, if packet can be sent now without violation Nagle's rules:
|
||||
* 1. It is full sized.
|
||||
* 2. Or it contains FIN. (already checked by caller)
|
||||
* 3. Or TCP_CORK is not set, and TCP_NODELAY is set.
|
||||
* 4. Or TCP_CORK is not set, and all sent packets are ACKed.
|
||||
* With Minshall's modification: all sent small packets are ACKed.
|
||||
*/
|
||||
static inline int tcp_nagle_check(const struct tcp_sock *tp,
|
||||
static inline bool tcp_nagle_check(const struct tcp_sock *tp,
|
||||
const struct sk_buff *skb,
|
||||
unsigned int mss_now, int nonagle)
|
||||
{
|
||||
@@ -1413,11 +1413,11 @@ static inline int tcp_nagle_check(const struct tcp_sock *tp,
|
||||
(!nonagle && tp->packets_out && tcp_minshall_check(tp)));
|
||||
}
|
||||
|
||||
/* Return non-zero if the Nagle test allows this packet to be
|
||||
/* Return true if the Nagle test allows this packet to be
|
||||
* sent now.
|
||||
*/
|
||||
static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
|
||||
unsigned int cur_mss, int nonagle)
|
||||
static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb,
|
||||
unsigned int cur_mss, int nonagle)
|
||||
{
|
||||
/* Nagle rule does not apply to frames, which sit in the middle of the
|
||||
* write_queue (they have no chances to get new data).
|
||||
@@ -1426,24 +1426,25 @@ static inline int tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff
|
||||
* argument based upon the location of SKB in the send queue.
|
||||
*/
|
||||
if (nonagle & TCP_NAGLE_PUSH)
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
/* Don't use the nagle rule for urgent data (or for the final FIN).
|
||||
* Nagle can be ignored during F-RTO too (see RFC4138).
|
||||
*/
|
||||
if (tcp_urg_mode(tp) || (tp->frto_counter == 2) ||
|
||||
(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
if (!tcp_nagle_check(tp, skb, cur_mss, nonagle))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Does at least the first segment of SKB fit into the send window? */
|
||||
static inline int tcp_snd_wnd_test(const struct tcp_sock *tp, const struct sk_buff *skb,
|
||||
unsigned int cur_mss)
|
||||
static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
|
||||
const struct sk_buff *skb,
|
||||
unsigned int cur_mss)
|
||||
{
|
||||
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
|
||||
|
||||
@@ -1476,7 +1477,7 @@ static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
|
||||
}
|
||||
|
||||
/* Test if sending is allowed right now. */
|
||||
int tcp_may_send_now(struct sock *sk)
|
||||
bool tcp_may_send_now(struct sock *sk)
|
||||
{
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb = tcp_send_head(sk);
|
||||
@@ -1546,7 +1547,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
|
||||
*
|
||||
* This algorithm is from John Heffner.
|
||||
*/
|
||||
static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
|
||||
static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
@@ -1606,11 +1607,11 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
|
||||
/* Ok, it looks like it is advisable to defer. */
|
||||
tp->tso_deferred = 1 | (jiffies << 1);
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
send_now:
|
||||
tp->tso_deferred = 0;
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Create a new MTU probe if we are ready.
|
||||
@@ -1752,11 +1753,11 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||
* snd_up-64k-mss .. snd_up cannot be large. However, taking into
|
||||
* account rare use of URG, this is not a big flaw.
|
||||
*
|
||||
* Returns 1, if no segments are in flight and we have queued segments, but
|
||||
* cannot send anything now because of SWS or another problem.
|
||||
* Returns true, if no segments are in flight and we have queued segments,
|
||||
* but cannot send anything now because of SWS or another problem.
|
||||
*/
|
||||
static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
||||
int push_one, gfp_t gfp)
|
||||
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
||||
int push_one, gfp_t gfp)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb;
|
||||
@@ -1770,7 +1771,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
||||
/* Do MTU probing. */
|
||||
result = tcp_mtu_probe(sk);
|
||||
if (!result) {
|
||||
return 0;
|
||||
return false;
|
||||
} else if (result > 0) {
|
||||
sent_pkts = 1;
|
||||
}
|
||||
@@ -1829,7 +1830,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
|
||||
|
||||
if (likely(sent_pkts)) {
|
||||
tcp_cwnd_validate(sk);
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
return !tp->packets_out && tcp_send_head(sk);
|
||||
}
|
||||
@@ -2028,22 +2029,22 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/* Check if coalescing SKBs is legal. */
|
||||
static int tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
|
||||
static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb)
|
||||
{
|
||||
if (tcp_skb_pcount(skb) > 1)
|
||||
return 0;
|
||||
return false;
|
||||
/* TODO: SACK collapsing could be used to remove this condition */
|
||||
if (skb_shinfo(skb)->nr_frags != 0)
|
||||
return 0;
|
||||
return false;
|
||||
if (skb_cloned(skb))
|
||||
return 0;
|
||||
return false;
|
||||
if (skb == tcp_send_head(sk))
|
||||
return 0;
|
||||
return false;
|
||||
/* Some heurestics for collapsing over SACK'd could be invented */
|
||||
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Collapse packets in the retransmit queue to make to create
|
||||
@@ -2054,7 +2055,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct sk_buff *skb = to, *tmp;
|
||||
int first = 1;
|
||||
bool first = true;
|
||||
|
||||
if (!sysctl_tcp_retrans_collapse)
|
||||
return;
|
||||
@@ -2068,7 +2069,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
|
||||
space -= skb->len;
|
||||
|
||||
if (first) {
|
||||
first = 0;
|
||||
first = false;
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2208,18 +2209,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
|
||||
/* Check if we forward retransmits are possible in the current
|
||||
* window/congestion state.
|
||||
*/
|
||||
static int tcp_can_forward_retransmit(struct sock *sk)
|
||||
static bool tcp_can_forward_retransmit(struct sock *sk)
|
||||
{
|
||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
const struct tcp_sock *tp = tcp_sk(sk);
|
||||
|
||||
/* Forward retransmissions are possible only during Recovery. */
|
||||
if (icsk->icsk_ca_state != TCP_CA_Recovery)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/* No forward retransmissions in Reno are possible. */
|
||||
if (tcp_is_reno(tp))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/* Yeah, we have to make difficult choice between forward transmission
|
||||
* and retransmission... Both ways have their merits...
|
||||
@@ -2230,9 +2231,9 @@ static int tcp_can_forward_retransmit(struct sock *sk)
|
||||
*/
|
||||
|
||||
if (tcp_may_send_now(sk))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
return 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* This gets called after a retransmit timeout, and the initially
|
||||
|
||||
Reference in New Issue
Block a user