mirror of
https://github.com/armbian/linux.git
synced 2026-01-06 10:13:00 -08:00
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
* master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6: (27 commits) [Bluetooth] Add RFCOMM role switch support [Bluetooth] Allow disabling of credit based flow control [Bluetooth] Small cleanup of the L2CAP source code [Bluetooth] Use real devices for host controllers [Bluetooth] Add platform device for virtual and serial devices [Bluetooth] Add automatic sniff mode support [Bluetooth] Correct SCO buffer size on request [Bluetooth] Add suspend/resume support to the HCI USB driver [Bluetooth] Use raw mode for the Frontline sniffer device [BRIDGE]: br_dump_ifinfo index fix [ATM]: add+use poison defines [NET]: add+use poison defines [IOAT]: fix kernel-doc in source files [IOAT]: fix header file kernel-doc [TG3]: Add ipv6 TSO feature [IPV6]: Fix ipv6 GSO payload length [TIPC] Fixed sk_buff panic caused by tipc_link_bundle_buf (REVISED) [NET]: Verify gso_type too in gso_segment [IPVS]: Add sysctl documentation [ROSE]: Try all routes when establishing a ROSE connections. ...
This commit is contained in:
143
Documentation/networking/ipvs-sysctl.txt
Normal file
143
Documentation/networking/ipvs-sysctl.txt
Normal file
@@ -0,0 +1,143 @@
|
||||
/proc/sys/net/ipv4/vs/* Variables:
|
||||
|
||||
am_droprate - INTEGER
|
||||
default 10
|
||||
|
||||
It sets the always mode drop rate, which is used in the mode 3
|
||||
of the drop_rate defense.
|
||||
|
||||
amemthresh - INTEGER
|
||||
default 1024
|
||||
|
||||
It sets the available memory threshold (in pages), which is
|
||||
used in the automatic modes of defense. When there is no
|
||||
enough available memory, the respective strategy will be
|
||||
enabled and the variable is automatically set to 2, otherwise
|
||||
the strategy is disabled and the variable is set to 1.
|
||||
|
||||
cache_bypass - BOOLEAN
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
||||
If it is enabled, forward packets to the original destination
|
||||
directly when no cache server is available and destination
|
||||
address is not local (iph->daddr is RTN_UNICAST). It is mostly
|
||||
used in transparent web cache cluster.
|
||||
|
||||
debug_level - INTEGER
|
||||
0 - transmission error messages (default)
|
||||
1 - non-fatal error messages
|
||||
2 - configuration
|
||||
3 - destination trash
|
||||
4 - drop entry
|
||||
5 - service lookup
|
||||
6 - scheduling
|
||||
7 - connection new/expire, lookup and synchronization
|
||||
8 - state transition
|
||||
9 - binding destination, template checks and applications
|
||||
10 - IPVS packet transmission
|
||||
11 - IPVS packet handling (ip_vs_in/ip_vs_out)
|
||||
12 or more - packet traversal
|
||||
|
||||
Only available when IPVS is compiled with the CONFIG_IPVS_DEBUG
|
||||
|
||||
Higher debugging levels include the messages for lower debugging
|
||||
levels, so setting debug level 2, includes level 0, 1 and 2
|
||||
messages. Thus, logging becomes more and more verbose the higher
|
||||
the level.
|
||||
|
||||
drop_entry - INTEGER
|
||||
0 - disabled (default)
|
||||
|
||||
The drop_entry defense is to randomly drop entries in the
|
||||
connection hash table, just in order to collect back some
|
||||
memory for new connections. In the current code, the
|
||||
drop_entry procedure can be activated every second, then it
|
||||
randomly scans 1/32 of the whole and drops entries that are in
|
||||
the SYN-RECV/SYNACK state, which should be effective against
|
||||
syn-flooding attack.
|
||||
|
||||
The valid values of drop_entry are from 0 to 3, where 0 means
|
||||
that this strategy is always disabled, 1 and 2 mean automatic
|
||||
modes (when there is no enough available memory, the strategy
|
||||
is enabled and the variable is automatically set to 2,
|
||||
otherwise the strategy is disabled and the variable is set to
|
||||
1), and 3 means that that the strategy is always enabled.
|
||||
|
||||
drop_packet - INTEGER
|
||||
0 - disabled (default)
|
||||
|
||||
The drop_packet defense is designed to drop 1/rate packets
|
||||
before forwarding them to real servers. If the rate is 1, then
|
||||
drop all the incoming packets.
|
||||
|
||||
The value definition is the same as that of the drop_entry. In
|
||||
the automatic mode, the rate is determined by the follow
|
||||
formula: rate = amemthresh / (amemthresh - available_memory)
|
||||
when available memory is less than the available memory
|
||||
threshold. When the mode 3 is set, the always mode drop rate
|
||||
is controlled by the /proc/sys/net/ipv4/vs/am_droprate.
|
||||
|
||||
expire_nodest_conn - BOOLEAN
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
||||
The default value is 0, the load balancer will silently drop
|
||||
packets when its destination server is not available. It may
|
||||
be useful, when user-space monitoring program deletes the
|
||||
destination server (because of server overload or wrong
|
||||
detection) and add back the server later, and the connections
|
||||
to the server can continue.
|
||||
|
||||
If this feature is enabled, the load balancer will expire the
|
||||
connection immediately when a packet arrives and its
|
||||
destination server is not available, then the client program
|
||||
will be notified that the connection is closed. This is
|
||||
equivalent to the feature some people requires to flush
|
||||
connections when its destination is not available.
|
||||
|
||||
expire_quiescent_template - BOOLEAN
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
||||
When set to a non-zero value, the load balancer will expire
|
||||
persistent templates when the destination server is quiescent.
|
||||
This may be useful, when a user makes a destination server
|
||||
quiescent by setting its weight to 0 and it is desired that
|
||||
subsequent otherwise persistent connections are sent to a
|
||||
different destination server. By default new persistent
|
||||
connections are allowed to quiescent destination servers.
|
||||
|
||||
If this feature is enabled, the load balancer will expire the
|
||||
persistence template if it is to be used to schedule a new
|
||||
connection and the destination server is quiescent.
|
||||
|
||||
nat_icmp_send - BOOLEAN
|
||||
0 - disabled (default)
|
||||
not 0 - enabled
|
||||
|
||||
It controls sending icmp error messages (ICMP_DEST_UNREACH)
|
||||
for VS/NAT when the load balancer receives packets from real
|
||||
servers but the connection entries don't exist.
|
||||
|
||||
secure_tcp - INTEGER
|
||||
0 - disabled (default)
|
||||
|
||||
The secure_tcp defense is to use a more complicated state
|
||||
transition table and some possible short timeouts of each
|
||||
state. In the VS/NAT, it delays the entering the ESTABLISHED
|
||||
until the real server starts to send data and ACK packet
|
||||
(after 3-way handshake).
|
||||
|
||||
The value definition is the same as that of drop_entry or
|
||||
drop_packet.
|
||||
|
||||
sync_threshold - INTEGER
|
||||
default 3
|
||||
|
||||
It sets synchronization threshold, which is the minimum number
|
||||
of incoming packets that a connection needs to receive before
|
||||
the connection will be synchronized. A connection will be
|
||||
synchronized, every time the number of its incoming packets
|
||||
modulus 50 equals the threshold. The range of the threshold is
|
||||
from 0 to 49.
|
||||
@@ -31,6 +31,7 @@
|
||||
#include <linux/atmdev.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/poison.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/io.h>
|
||||
@@ -1995,7 +1996,7 @@ static int __devinit ucode_init (loader_block * lb, amb_dev * dev) {
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
if (*pointer == 0xdeadbeef) {
|
||||
if (*pointer == ATM_POISON) {
|
||||
return loader_start (lb, dev, ucode_start);
|
||||
} else {
|
||||
// cast needed as there is no %? for pointer differnces
|
||||
|
||||
@@ -35,6 +35,7 @@ static char const rcsid[] =
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/poison.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/vmalloc.h>
|
||||
@@ -3657,7 +3658,7 @@ probe_sram(struct idt77252_dev *card)
|
||||
writel(SAR_CMD_WRITE_SRAM | (0 << 2), SAR_REG_CMD);
|
||||
|
||||
for (addr = 0x4000; addr < 0x80000; addr += 0x4000) {
|
||||
writel(0xdeadbeef, SAR_REG_DR0);
|
||||
writel(ATM_POISON, SAR_REG_DR0);
|
||||
writel(SAR_CMD_WRITE_SRAM | (addr << 2), SAR_REG_CMD);
|
||||
|
||||
writel(SAR_CMD_READ_SRAM | (0 << 2), SAR_REG_CMD);
|
||||
|
||||
@@ -739,6 +739,7 @@ static int bluecard_open(bluecard_info_t *info)
|
||||
|
||||
hdev->type = HCI_PCCARD;
|
||||
hdev->driver_data = info;
|
||||
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
|
||||
|
||||
hdev->open = bluecard_hci_open;
|
||||
hdev->close = bluecard_hci_close;
|
||||
|
||||
@@ -582,6 +582,7 @@ static int bt3c_open(bt3c_info_t *info)
|
||||
|
||||
hdev->type = HCI_PCCARD;
|
||||
hdev->driver_data = info;
|
||||
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
|
||||
|
||||
hdev->open = bt3c_hci_open;
|
||||
hdev->close = bt3c_hci_close;
|
||||
|
||||
@@ -502,6 +502,7 @@ static int btuart_open(btuart_info_t *info)
|
||||
|
||||
hdev->type = HCI_PCCARD;
|
||||
hdev->driver_data = info;
|
||||
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
|
||||
|
||||
hdev->open = btuart_hci_open;
|
||||
hdev->close = btuart_hci_close;
|
||||
|
||||
@@ -484,6 +484,7 @@ static int dtl1_open(dtl1_info_t *info)
|
||||
|
||||
hdev->type = HCI_PCCARD;
|
||||
hdev->driver_data = info;
|
||||
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
|
||||
|
||||
hdev->open = dtl1_hci_open;
|
||||
hdev->close = dtl1_hci_close;
|
||||
|
||||
@@ -122,6 +122,9 @@ static struct usb_device_id blacklist_ids[] = {
|
||||
/* RTX Telecom based adapter with buggy SCO support */
|
||||
{ USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC },
|
||||
|
||||
/* Belkin F8T012 */
|
||||
{ USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU },
|
||||
|
||||
/* Digianswer devices */
|
||||
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
|
||||
{ USB_DEVICE(0x08fd, 0x0002), .driver_info = HCI_IGNORE },
|
||||
@@ -129,6 +132,9 @@ static struct usb_device_id blacklist_ids[] = {
|
||||
/* CSR BlueCore Bluetooth Sniffer */
|
||||
{ USB_DEVICE(0x0a12, 0x0002), .driver_info = HCI_SNIFFER },
|
||||
|
||||
/* Frontline ComProbe Bluetooth Sniffer */
|
||||
{ USB_DEVICE(0x16d3, 0x0002), .driver_info = HCI_SNIFFER },
|
||||
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
@@ -984,6 +990,9 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
|
||||
if (reset || id->driver_info & HCI_RESET)
|
||||
set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
|
||||
|
||||
if (id->driver_info & HCI_WRONG_SCO_MTU)
|
||||
set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
|
||||
|
||||
if (id->driver_info & HCI_SNIFFER) {
|
||||
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
|
||||
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
|
||||
@@ -1042,10 +1051,81 @@ static void hci_usb_disconnect(struct usb_interface *intf)
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
|
||||
static int hci_usb_suspend(struct usb_interface *intf, pm_message_t message)
|
||||
{
|
||||
struct hci_usb *husb = usb_get_intfdata(intf);
|
||||
struct list_head killed;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
if (!husb || intf == husb->isoc_iface)
|
||||
return 0;
|
||||
|
||||
hci_suspend_dev(husb->hdev);
|
||||
|
||||
INIT_LIST_HEAD(&killed);
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
struct _urb_queue *q = &husb->pending_q[i];
|
||||
struct _urb *_urb, *_tmp;
|
||||
|
||||
while ((_urb = _urb_dequeue(q))) {
|
||||
/* reset queue since _urb_dequeue sets it to NULL */
|
||||
_urb->queue = q;
|
||||
usb_kill_urb(&_urb->urb);
|
||||
list_add(&_urb->list, &killed);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(_urb, _tmp, &killed, list) {
|
||||
list_move_tail(&_urb->list, &q->head);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hci_usb_resume(struct usb_interface *intf)
|
||||
{
|
||||
struct hci_usb *husb = usb_get_intfdata(intf);
|
||||
unsigned long flags;
|
||||
int i, err = 0;
|
||||
|
||||
if (!husb || intf == husb->isoc_iface)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
struct _urb_queue *q = &husb->pending_q[i];
|
||||
struct _urb *_urb;
|
||||
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
|
||||
list_for_each_entry(_urb, &q->head, list) {
|
||||
err = usb_submit_urb(&_urb->urb, GFP_ATOMIC);
|
||||
if (err)
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
|
||||
if (err)
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
hci_resume_dev(husb->hdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct usb_driver hci_usb_driver = {
|
||||
.name = "hci_usb",
|
||||
.probe = hci_usb_probe,
|
||||
.disconnect = hci_usb_disconnect,
|
||||
.suspend = hci_usb_suspend,
|
||||
.resume = hci_usb_resume,
|
||||
.id_table = bluetooth_ids,
|
||||
};
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
#define HCI_SNIFFER 0x10
|
||||
#define HCI_BCM92035 0x20
|
||||
#define HCI_BROKEN_ISOC 0x40
|
||||
#define HCI_WRONG_SCO_MTU 0x80
|
||||
|
||||
#define HCI_MAX_IFACE_NUM 3
|
||||
|
||||
|
||||
@@ -277,7 +277,6 @@ static int vhci_open(struct inode *inode, struct file *file)
|
||||
|
||||
hdev->type = HCI_VHCI;
|
||||
hdev->driver_data = vhci;
|
||||
SET_HCIDEV_DEV(hdev, vhci_miscdev.dev);
|
||||
|
||||
hdev->open = vhci_open_dev;
|
||||
hdev->close = vhci_close_dev;
|
||||
|
||||
@@ -166,8 +166,8 @@ static struct dma_chan *dma_client_chan_alloc(struct dma_client *client)
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_client_chan_free - release a DMA channel
|
||||
* @chan: &dma_chan
|
||||
* dma_chan_cleanup - release a DMA channel's resources
|
||||
* @kref: kernel reference structure that contains the DMA channel device
|
||||
*/
|
||||
void dma_chan_cleanup(struct kref *kref)
|
||||
{
|
||||
@@ -199,7 +199,7 @@ static void dma_client_chan_free(struct dma_chan *chan)
|
||||
* dma_chans_rebalance - reallocate channels to clients
|
||||
*
|
||||
* When the number of DMA channel in the system changes,
|
||||
* channels need to be rebalanced among clients
|
||||
* channels need to be rebalanced among clients.
|
||||
*/
|
||||
static void dma_chans_rebalance(void)
|
||||
{
|
||||
@@ -264,7 +264,7 @@ struct dma_client *dma_async_client_register(dma_event_callback event_callback)
|
||||
|
||||
/**
|
||||
* dma_async_client_unregister - unregister a client and free the &dma_client
|
||||
* @client:
|
||||
* @client: &dma_client to free
|
||||
*
|
||||
* Force frees any allocated DMA channels, frees the &dma_client memory
|
||||
*/
|
||||
@@ -306,7 +306,7 @@ void dma_async_client_chan_request(struct dma_client *client,
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_async_device_register -
|
||||
* dma_async_device_register - registers DMA devices found
|
||||
* @device: &dma_device
|
||||
*/
|
||||
int dma_async_device_register(struct dma_device *device)
|
||||
@@ -348,8 +348,8 @@ int dma_async_device_register(struct dma_device *device)
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_async_device_unregister -
|
||||
* @device: &dma_device
|
||||
* dma_async_device_cleanup - function called when all references are released
|
||||
* @kref: kernel reference object
|
||||
*/
|
||||
static void dma_async_device_cleanup(struct kref *kref)
|
||||
{
|
||||
@@ -359,7 +359,11 @@ static void dma_async_device_cleanup(struct kref *kref)
|
||||
complete(&device->done);
|
||||
}
|
||||
|
||||
void dma_async_device_unregister(struct dma_device* device)
|
||||
/**
|
||||
* dma_async_device_unregister - unregisters DMA devices
|
||||
* @device: &dma_device
|
||||
*/
|
||||
void dma_async_device_unregister(struct dma_device *device)
|
||||
{
|
||||
struct dma_chan *chan;
|
||||
unsigned long flags;
|
||||
|
||||
@@ -217,7 +217,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
|
||||
|
||||
/**
|
||||
* do_ioat_dma_memcpy - actual function that initiates a IOAT DMA transaction
|
||||
* @chan: IOAT DMA channel handle
|
||||
* @ioat_chan: IOAT DMA channel handle
|
||||
* @dest: DMA destination address
|
||||
* @src: DMA source address
|
||||
* @len: transaction length in bytes
|
||||
@@ -383,7 +383,7 @@ static dma_cookie_t ioat_dma_memcpy_buf_to_pg(struct dma_chan *chan,
|
||||
* @dest_off: offset into that page
|
||||
* @src_pg: pointer to the page to copy from
|
||||
* @src_off: offset into that page
|
||||
* @len: transaction length in bytes. This is guaranteed to not make a copy
|
||||
* @len: transaction length in bytes. This is guaranteed not to make a copy
|
||||
* across a page boundary.
|
||||
*/
|
||||
|
||||
@@ -407,7 +407,7 @@ static dma_cookie_t ioat_dma_memcpy_pg_to_pg(struct dma_chan *chan,
|
||||
}
|
||||
|
||||
/**
|
||||
* ioat_dma_memcpy_issue_pending - push potentially unrecognoized appended descriptors to hw
|
||||
* ioat_dma_memcpy_issue_pending - push potentially unrecognized appended descriptors to hw
|
||||
* @chan: DMA channel handle
|
||||
*/
|
||||
|
||||
@@ -510,6 +510,8 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *chan)
|
||||
* ioat_dma_is_complete - poll the status of a IOAT DMA transaction
|
||||
* @chan: IOAT DMA channel handle
|
||||
* @cookie: DMA transaction identifier
|
||||
* @done: if not %NULL, updated with last completed transaction
|
||||
* @used: if not %NULL, updated with last used transaction
|
||||
*/
|
||||
|
||||
static enum dma_status ioat_dma_is_complete(struct dma_chan *chan,
|
||||
@@ -826,7 +828,7 @@ static int __init ioat_init_module(void)
|
||||
/* if forced, worst case is that rmmod hangs */
|
||||
__unsafe(THIS_MODULE);
|
||||
|
||||
pci_module_init(&ioat_pci_drv);
|
||||
return pci_module_init(&ioat_pci_drv);
|
||||
}
|
||||
|
||||
module_init(ioat_init_module);
|
||||
|
||||
@@ -76,7 +76,7 @@
|
||||
#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */
|
||||
#define IOAT_CHANSTS_OFFSET_LOW 0x04
|
||||
#define IOAT_CHANSTS_OFFSET_HIGH 0x08
|
||||
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0
|
||||
#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0UL
|
||||
#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010
|
||||
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007
|
||||
#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
int num_pages_spanned(struct iovec *iov)
|
||||
static int num_pages_spanned(struct iovec *iov)
|
||||
{
|
||||
return
|
||||
((PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
|
||||
|
||||
@@ -68,8 +68,8 @@
|
||||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "3.61"
|
||||
#define DRV_MODULE_RELDATE "June 29, 2006"
|
||||
#define DRV_MODULE_VERSION "3.62"
|
||||
#define DRV_MODULE_RELDATE "June 30, 2006"
|
||||
|
||||
#define TG3_DEF_MAC_MODE 0
|
||||
#define TG3_DEF_RX_MODE 0
|
||||
@@ -3798,18 +3798,24 @@ static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
tcp_opt_len = ((skb->h.th->doff - 5) * 4);
|
||||
ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
|
||||
if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
|
||||
mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
|
||||
else {
|
||||
tcp_opt_len = ((skb->h.th->doff - 5) * 4);
|
||||
ip_tcp_len = (skb->nh.iph->ihl * 4) +
|
||||
sizeof(struct tcphdr);
|
||||
|
||||
skb->nh.iph->check = 0;
|
||||
skb->nh.iph->tot_len = htons(mss + ip_tcp_len +
|
||||
tcp_opt_len);
|
||||
mss |= (ip_tcp_len + tcp_opt_len) << 9;
|
||||
}
|
||||
|
||||
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
|
||||
TXD_FLAG_CPU_POST_DMA);
|
||||
|
||||
skb->nh.iph->check = 0;
|
||||
skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
|
||||
|
||||
skb->h.th->check = 0;
|
||||
|
||||
mss |= (ip_tcp_len + tcp_opt_len) << 9;
|
||||
}
|
||||
else if (skb->ip_summed == CHECKSUM_HW)
|
||||
base_flags |= TXD_FLAG_TCPUDP_CSUM;
|
||||
@@ -7887,6 +7893,12 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) {
|
||||
if (value)
|
||||
dev->features |= NETIF_F_TSO6;
|
||||
else
|
||||
dev->features &= ~NETIF_F_TSO6;
|
||||
}
|
||||
return ethtool_op_set_tso(dev, value);
|
||||
}
|
||||
#endif
|
||||
@@ -11507,8 +11519,11 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
|
||||
* Firmware TSO on older chips gives lower performance, so it
|
||||
* is off by default, but can be enabled using ethtool.
|
||||
*/
|
||||
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
|
||||
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
|
||||
dev->features |= NETIF_F_TSO;
|
||||
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
|
||||
dev->features |= NETIF_F_TSO6;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ enum dma_event {
|
||||
};
|
||||
|
||||
/**
|
||||
* typedef dma_cookie_t
|
||||
* typedef dma_cookie_t - an opaque DMA cookie
|
||||
*
|
||||
* if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
|
||||
*/
|
||||
@@ -80,14 +80,14 @@ struct dma_chan_percpu {
|
||||
|
||||
/**
|
||||
* struct dma_chan - devices supply DMA channels, clients use them
|
||||
* @client: ptr to the client user of this chan, will be NULL when unused
|
||||
* @device: ptr to the dma device who supplies this channel, always !NULL
|
||||
* @client: ptr to the client user of this chan, will be %NULL when unused
|
||||
* @device: ptr to the dma device who supplies this channel, always !%NULL
|
||||
* @cookie: last cookie value returned to client
|
||||
* @chan_id:
|
||||
* @class_dev:
|
||||
* @chan_id: channel ID for sysfs
|
||||
* @class_dev: class device for sysfs
|
||||
* @refcount: kref, used in "bigref" slow-mode
|
||||
* @slow_ref:
|
||||
* @rcu:
|
||||
* @slow_ref: indicates that the DMA channel is free
|
||||
* @rcu: the DMA channel's RCU head
|
||||
* @client_node: used to add this to the client chan list
|
||||
* @device_node: used to add this to the device chan list
|
||||
* @local: per-cpu pointer to a struct dma_chan_percpu
|
||||
@@ -162,10 +162,17 @@ struct dma_client {
|
||||
* @chancnt: how many DMA channels are supported
|
||||
* @channels: the list of struct dma_chan
|
||||
* @global_node: list_head for global dma_device_list
|
||||
* @refcount:
|
||||
* @done:
|
||||
* @dev_id:
|
||||
* Other func ptrs: used to make use of this device's capabilities
|
||||
* @refcount: reference count
|
||||
* @done: IO completion struct
|
||||
* @dev_id: unique device ID
|
||||
* @device_alloc_chan_resources: allocate resources and return the
|
||||
* number of allocated descriptors
|
||||
* @device_free_chan_resources: release DMA channel's resources
|
||||
* @device_memcpy_buf_to_buf: memcpy buf pointer to buf pointer
|
||||
* @device_memcpy_buf_to_pg: memcpy buf pointer to struct page
|
||||
* @device_memcpy_pg_to_pg: memcpy struct page/offset to struct page/offset
|
||||
* @device_memcpy_complete: poll the status of an IOAT DMA transaction
|
||||
* @device_memcpy_issue_pending: push appended descriptors to hardware
|
||||
*/
|
||||
struct dma_device {
|
||||
|
||||
@@ -211,7 +218,7 @@ void dma_async_client_chan_request(struct dma_client *client,
|
||||
* Both @dest and @src must be mappable to a bus address according to the
|
||||
* DMA mapping API rules for streaming mappings.
|
||||
* Both @dest and @src must stay memory resident (kernel memory or locked
|
||||
* user space pages)
|
||||
* user space pages).
|
||||
*/
|
||||
static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
||||
void *dest, void *src, size_t len)
|
||||
@@ -225,7 +232,7 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_async_memcpy_buf_to_pg - offloaded copy
|
||||
* dma_async_memcpy_buf_to_pg - offloaded copy from address to page
|
||||
* @chan: DMA channel to offload copy to
|
||||
* @page: destination page
|
||||
* @offset: offset in page to copy to
|
||||
@@ -250,18 +257,18 @@ static inline dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_async_memcpy_buf_to_pg - offloaded copy
|
||||
* dma_async_memcpy_pg_to_pg - offloaded copy from page to page
|
||||
* @chan: DMA channel to offload copy to
|
||||
* @dest_page: destination page
|
||||
* @dest_pg: destination page
|
||||
* @dest_off: offset in page to copy to
|
||||
* @src_page: source page
|
||||
* @src_pg: source page
|
||||
* @src_off: offset in page to copy from
|
||||
* @len: length
|
||||
*
|
||||
* Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
|
||||
* address according to the DMA mapping API rules for streaming mappings.
|
||||
* Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
|
||||
* (kernel memory or locked user space pages)
|
||||
* (kernel memory or locked user space pages).
|
||||
*/
|
||||
static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
|
||||
struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
|
||||
@@ -278,7 +285,7 @@ static inline dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
|
||||
|
||||
/**
|
||||
* dma_async_memcpy_issue_pending - flush pending copies to HW
|
||||
* @chan:
|
||||
* @chan: target DMA channel
|
||||
*
|
||||
* This allows drivers to push copies to HW in batches,
|
||||
* reducing MMIO writes where possible.
|
||||
|
||||
@@ -44,6 +44,11 @@
|
||||
|
||||
/********** drivers/atm/ **********/
|
||||
#define ATM_POISON_FREE 0x12
|
||||
#define ATM_POISON 0xdeadbeef
|
||||
|
||||
/********** net/ **********/
|
||||
#define NEIGHBOR_DEAD 0xdeadbeef
|
||||
#define NETFILTER_LINK_POISON 0xdead57ac
|
||||
|
||||
/********** kernel/mutexes **********/
|
||||
#define MUTEX_DEBUG_INIT 0x11
|
||||
|
||||
@@ -182,14 +182,26 @@ typedef struct {
|
||||
|
||||
typedef struct ax25_route {
|
||||
struct ax25_route *next;
|
||||
atomic_t ref;
|
||||
atomic_t refcount;
|
||||
ax25_address callsign;
|
||||
struct net_device *dev;
|
||||
ax25_digi *digipeat;
|
||||
char ip_mode;
|
||||
struct timer_list timer;
|
||||
} ax25_route;
|
||||
|
||||
static inline void ax25_hold_route(ax25_route *ax25_rt)
|
||||
{
|
||||
atomic_inc(&ax25_rt->refcount);
|
||||
}
|
||||
|
||||
extern void __ax25_put_route(ax25_route *ax25_rt);
|
||||
|
||||
static inline void ax25_put_route(ax25_route *ax25_rt)
|
||||
{
|
||||
if (atomic_dec_and_test(&ax25_rt->refcount))
|
||||
__ax25_put_route(ax25_rt);
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
char slave; /* slave_mode? */
|
||||
struct timer_list slave_timer; /* timeout timer */
|
||||
@@ -348,17 +360,11 @@ extern int ax25_check_iframes_acked(ax25_cb *, unsigned short);
|
||||
extern void ax25_rt_device_down(struct net_device *);
|
||||
extern int ax25_rt_ioctl(unsigned int, void __user *);
|
||||
extern struct file_operations ax25_route_fops;
|
||||
extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
|
||||
extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
|
||||
extern ax25_route *ax25_rt_find_route(ax25_route *, ax25_address *,
|
||||
struct net_device *);
|
||||
extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
|
||||
extern void ax25_rt_free(void);
|
||||
|
||||
static inline void ax25_put_route(ax25_route *ax25_rt)
|
||||
{
|
||||
atomic_dec(&ax25_rt->ref);
|
||||
}
|
||||
|
||||
/* ax25_std_in.c */
|
||||
extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
|
||||
|
||||
|
||||
@@ -175,6 +175,6 @@ extern int hci_sock_cleanup(void);
|
||||
extern int bt_sysfs_init(void);
|
||||
extern void bt_sysfs_cleanup(void);
|
||||
|
||||
extern struct class bt_class;
|
||||
extern struct class *bt_class;
|
||||
|
||||
#endif /* __BLUETOOTH_H */
|
||||
|
||||
@@ -54,7 +54,8 @@
|
||||
/* HCI device quirks */
|
||||
enum {
|
||||
HCI_QUIRK_RESET_ON_INIT,
|
||||
HCI_QUIRK_RAW_DEVICE
|
||||
HCI_QUIRK_RAW_DEVICE,
|
||||
HCI_QUIRK_FIXUP_BUFFER_SIZE
|
||||
};
|
||||
|
||||
/* HCI device flags */
|
||||
@@ -100,9 +101,10 @@ enum {
|
||||
#define HCIINQUIRY _IOR('H', 240, int)
|
||||
|
||||
/* HCI timeouts */
|
||||
#define HCI_CONN_TIMEOUT (HZ * 40)
|
||||
#define HCI_DISCONN_TIMEOUT (HZ * 2)
|
||||
#define HCI_CONN_IDLE_TIMEOUT (HZ * 60)
|
||||
#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */
|
||||
#define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */
|
||||
#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */
|
||||
#define HCI_INIT_TIMEOUT (10000) /* 10 seconds */
|
||||
|
||||
/* HCI Packet types */
|
||||
#define HCI_COMMAND_PKT 0x01
|
||||
@@ -144,7 +146,7 @@ enum {
|
||||
#define LMP_TACCURACY 0x10
|
||||
#define LMP_RSWITCH 0x20
|
||||
#define LMP_HOLD 0x40
|
||||
#define LMP_SNIF 0x80
|
||||
#define LMP_SNIFF 0x80
|
||||
|
||||
#define LMP_PARK 0x01
|
||||
#define LMP_RSSI 0x02
|
||||
@@ -159,13 +161,21 @@ enum {
|
||||
#define LMP_PSCHEME 0x02
|
||||
#define LMP_PCONTROL 0x04
|
||||
|
||||
#define LMP_SNIFF_SUBR 0x02
|
||||
|
||||
/* Connection modes */
|
||||
#define HCI_CM_ACTIVE 0x0000
|
||||
#define HCI_CM_HOLD 0x0001
|
||||
#define HCI_CM_SNIFF 0x0002
|
||||
#define HCI_CM_PARK 0x0003
|
||||
|
||||
/* Link policies */
|
||||
#define HCI_LP_RSWITCH 0x0001
|
||||
#define HCI_LP_HOLD 0x0002
|
||||
#define HCI_LP_SNIFF 0x0004
|
||||
#define HCI_LP_PARK 0x0008
|
||||
|
||||
/* Link mode */
|
||||
/* Link modes */
|
||||
#define HCI_LM_ACCEPT 0x8000
|
||||
#define HCI_LM_MASTER 0x0001
|
||||
#define HCI_LM_AUTH 0x0002
|
||||
@@ -191,7 +201,7 @@ struct hci_rp_read_loc_version {
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define OCF_READ_LOCAL_FEATURES 0x0003
|
||||
struct hci_rp_read_loc_features {
|
||||
struct hci_rp_read_local_features {
|
||||
__u8 status;
|
||||
__u8 features[8];
|
||||
} __attribute__ ((packed));
|
||||
@@ -375,17 +385,32 @@ struct hci_cp_change_conn_link_key {
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define OCF_READ_REMOTE_FEATURES 0x001B
|
||||
struct hci_cp_read_rmt_features {
|
||||
struct hci_cp_read_remote_features {
|
||||
__le16 handle;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define OCF_READ_REMOTE_VERSION 0x001D
|
||||
struct hci_cp_read_rmt_version {
|
||||
struct hci_cp_read_remote_version {
|
||||
__le16 handle;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Link Policy */
|
||||
#define OGF_LINK_POLICY 0x02
|
||||
#define OGF_LINK_POLICY 0x02
|
||||
|
||||
#define OCF_SNIFF_MODE 0x0003
|
||||
struct hci_cp_sniff_mode {
|
||||
__le16 handle;
|
||||
__le16 max_interval;
|
||||
__le16 min_interval;
|
||||
__le16 attempt;
|
||||
__le16 timeout;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define OCF_EXIT_SNIFF_MODE 0x0004
|
||||
struct hci_cp_exit_sniff_mode {
|
||||
__le16 handle;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define OCF_ROLE_DISCOVERY 0x0009
|
||||
struct hci_cp_role_discovery {
|
||||
__le16 handle;
|
||||
@@ -406,7 +431,7 @@ struct hci_rp_read_link_policy {
|
||||
__le16 policy;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define OCF_SWITCH_ROLE 0x000B
|
||||
#define OCF_SWITCH_ROLE 0x000B
|
||||
struct hci_cp_switch_role {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 role;
|
||||
@@ -422,6 +447,14 @@ struct hci_rp_write_link_policy {
|
||||
__le16 handle;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define OCF_SNIFF_SUBRATE 0x0011
|
||||
struct hci_cp_sniff_subrate {
|
||||
__le16 handle;
|
||||
__le16 max_latency;
|
||||
__le16 min_remote_timeout;
|
||||
__le16 min_local_timeout;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Status params */
|
||||
#define OGF_STATUS_PARAM 0x05
|
||||
|
||||
@@ -581,15 +614,15 @@ struct hci_ev_link_key_notify {
|
||||
__u8 key_type;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define HCI_EV_RMT_FEATURES 0x0B
|
||||
struct hci_ev_rmt_features {
|
||||
#define HCI_EV_REMOTE_FEATURES 0x0B
|
||||
struct hci_ev_remote_features {
|
||||
__u8 status;
|
||||
__le16 handle;
|
||||
__u8 features[8];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define HCI_EV_RMT_VERSION 0x0C
|
||||
struct hci_ev_rmt_version {
|
||||
#define HCI_EV_REMOTE_VERSION 0x0C
|
||||
struct hci_ev_remote_version {
|
||||
__u8 status;
|
||||
__le16 handle;
|
||||
__u8 lmp_ver;
|
||||
@@ -610,6 +643,16 @@ struct hci_ev_pscan_rep_mode {
|
||||
__u8 pscan_rep_mode;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
#define HCI_EV_SNIFF_SUBRATE 0x2E
|
||||
struct hci_ev_sniff_subrate {
|
||||
__u8 status;
|
||||
__le16 handle;
|
||||
__le16 max_tx_latency;
|
||||
__le16 max_rx_latency;
|
||||
__le16 max_remote_timeout;
|
||||
__le16 max_local_timeout;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/* Internal events generated by Bluetooth stack */
|
||||
#define HCI_EV_STACK_INTERNAL 0xFD
|
||||
struct hci_ev_stack_internal {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user