Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2012-01-04 21:35:43 -05:00
commit 117ff42fd4
18 changed files with 99 additions and 86 deletions

View File

@ -1698,11 +1698,9 @@ F: arch/x86/include/asm/tce.h
CAN NETWORK LAYER
M: Oliver Hartkopp <socketcan@hartkopp.net>
M: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
M: Urs Thuermann <urs.thuermann@volkswagen.de>
L: linux-can@vger.kernel.org
L: netdev@vger.kernel.org
W: http://developer.berlios.de/projects/socketcan/
W: http://gitorious.org/linux-can
T: git git://gitorious.org/linux-can/linux-can-next.git
S: Maintained
F: net/can/
F: include/linux/can.h
@ -1713,9 +1711,10 @@ F: include/linux/can/gw.h
CAN NETWORK DRIVERS
M: Wolfgang Grandegger <wg@grandegger.com>
M: Marc Kleine-Budde <mkl@pengutronix.de>
L: linux-can@vger.kernel.org
L: netdev@vger.kernel.org
W: http://developer.berlios.de/projects/socketcan/
W: http://gitorious.org/linux-can
T: git git://gitorious.org/linux-can/linux-can-next.git
S: Maintained
F: drivers/net/can/
F: include/linux/can/dev.h

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION =
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*

View File

@ -2560,7 +2560,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
if (rdev->pm.default_power_state_index >= 0)
rdev->pm.current_vddc =
rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
else
rdev->pm.current_vddc = 0;
}
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)

View File

@ -23,8 +23,8 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS)
default ARCH_MXC || ARCH_MXS if ARM
ARCH_MXC || SOC_IMX28)
default ARCH_MXC || SOC_IMX28 if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet

View File

@ -1797,6 +1797,9 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_node *an = (struct ath_node *) sta->drv_priv;
if (!(sc->sc_flags & SC_OP_TXAGGR))
return;
switch (cmd) {
case STA_NOTIFY_SLEEP:
an->sleeping = true;

View File

@ -617,9 +617,19 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
const char *err_msg = NULL;
struct b43_rxhdr_fw4 *rxhdr =
(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
size_t rxhdr_size = sizeof(*rxhdr);
BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
memset(rxhdr, 0, sizeof(*rxhdr));
switch (dev->fw.hdr_format) {
case B43_FW_HDR_410:
case B43_FW_HDR_351:
rxhdr_size -= sizeof(rxhdr->format_598) -
sizeof(rxhdr->format_351);
break;
case B43_FW_HDR_598:
break;
}
memset(rxhdr, 0, rxhdr_size);
/* Check if we have data and wait for it to get ready. */
if (q->rev >= 8) {
@ -657,11 +667,11 @@ data_ready:
/* Get the preamble (RX header) */
if (q->rev >= 8) {
b43_block_read(dev, rxhdr, sizeof(*rxhdr),
b43_block_read(dev, rxhdr, rxhdr_size,
q->mmio_base + B43_PIO8_RXDATA,
sizeof(u32));
} else {
b43_block_read(dev, rxhdr, sizeof(*rxhdr),
b43_block_read(dev, rxhdr, rxhdr_size,
q->mmio_base + B43_PIO_RXDATA,
sizeof(u16));
}

View File

@ -55,9 +55,14 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
bool cancel_flag = false;
int status = adapter->cmd_wait_q.status;
struct cmd_ctrl_node *cmd_queued = adapter->cmd_queued;
struct cmd_ctrl_node *cmd_queued;
if (!adapter->cmd_queued)
return 0;
cmd_queued = adapter->cmd_queued;
adapter->cmd_queued = NULL;
dev_dbg(adapter->dev, "cmd pending\n");
atomic_inc(&adapter->cmd_pending);

View File

@ -73,8 +73,6 @@ int rtc_set_time(struct rtc_device *rtc, struct rtc_time *tm)
err = -EINVAL;
mutex_unlock(&rtc->ops_lock);
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
return err;
}
EXPORT_SYMBOL_GPL(rtc_set_time);
@ -114,8 +112,6 @@ int rtc_set_mmss(struct rtc_device *rtc, unsigned long secs)
err = -EINVAL;
mutex_unlock(&rtc->ops_lock);
/* A timer might have just expired */
schedule_work(&rtc->irqwork);
return err;
}
@ -323,20 +319,6 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
int err;
if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->set_alarm)
err = -EINVAL;
else
err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
return err;
}
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
@ -360,7 +342,14 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
* over right here, before we set the alarm.
*/
return ___rtc_set_alarm(rtc, alarm);
if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->set_alarm)
err = -EINVAL;
else
err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
return err;
}
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
@ -407,8 +396,6 @@ int rtc_initialize_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
timerqueue_add(&rtc->timerqueue, &rtc->aie_timer.node);
}
mutex_unlock(&rtc->ops_lock);
/* maybe that was in the past.*/
schedule_work(&rtc->irqwork);
return err;
}
EXPORT_SYMBOL_GPL(rtc_initialize_alarm);
@ -776,20 +763,6 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
return 0;
}
static void rtc_alarm_disable(struct rtc_device *rtc)
{
struct rtc_wkalrm alarm;
struct rtc_time tm;
__rtc_read_time(rtc, &tm);
alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
ktime_set(300, 0)));
alarm.enabled = 0;
___rtc_set_alarm(rtc, &alarm);
}
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
@ -811,10 +784,8 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
struct rtc_wkalrm alarm;
int err;
next = timerqueue_getnext(&rtc->timerqueue);
if (!next) {
rtc_alarm_disable(rtc);
if (!next)
return;
}
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
@ -876,8 +847,7 @@ again:
err = __rtc_set_alarm(rtc, &alarm);
if (err == -ETIME)
goto again;
} else
rtc_alarm_disable(rtc);
}
mutex_unlock(&rtc->ops_lock);
}

View File

@ -282,7 +282,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
byte_count += total_in_buf2;
/* don't allow buffer to overflow */
if (byte_count > CIFSMaxBufSize)
if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
return -ENOBUFS;
pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
@ -2122,7 +2122,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
warned_on_ntlm = true;
cERROR(1, "default security mechanism requested. The default "
"security mechanism will be upgraded from ntlm to "
"ntlmv2 in kernel release 3.2");
"ntlmv2 in kernel release 3.3");
}
ses->overrideSecFlg = volume_info->secFlg;

View File

@ -263,23 +263,6 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
goto out_no_root;
}
ret = -ENOMEM;
s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto out_iput;
if (!(s->s_flags & MS_RDONLY)) {
if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
ms->s_state &= ~MINIX_VALID_FS;
mark_buffer_dirty(bh);
}
if (!(sbi->s_mount_state & MINIX_VALID_FS))
printk("MINIX-fs: mounting unchecked file system, "
"running fsck is recommended\n");
else if (sbi->s_mount_state & MINIX_ERROR_FS)
printk("MINIX-fs: mounting file system with errors, "
"running fsck is recommended\n");
/* Apparently minix can create filesystems that allocate more blocks for
* the bitmaps than needed. We simply ignore that, but verify it didn't
* create one with not enough blocks and bail out if so.
@ -300,6 +283,23 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
goto out_iput;
}
ret = -ENOMEM;
s->s_root = d_alloc_root(root_inode);
if (!s->s_root)
goto out_iput;
if (!(s->s_flags & MS_RDONLY)) {
if (sbi->s_version != MINIX_V3) /* s_state is now out from V3 sb */
ms->s_state &= ~MINIX_VALID_FS;
mark_buffer_dirty(bh);
}
if (!(sbi->s_mount_state & MINIX_VALID_FS))
printk("MINIX-fs: mounting unchecked file system, "
"running fsck is recommended\n");
else if (sbi->s_mount_state & MINIX_ERROR_FS)
printk("MINIX-fs: mounting file system with errors, "
"running fsck is recommended\n");
return 0;
out_iput:

View File

@ -2056,7 +2056,7 @@ static inline int security_old_inode_init_security(struct inode *inode,
char **name, void **value,
size_t *len)
{
return 0;
return -EOPNOTSUPP;
}
static inline int security_inode_create(struct inode *dir,

View File

@ -1540,8 +1540,15 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
}
/* dead body doesn't have much to contribute */
if (p->exit_state == EXIT_DEAD)
if (unlikely(p->exit_state == EXIT_DEAD)) {
/*
* But do not ignore this task until the tracer does
* wait_task_zombie()->do_notify_parent().
*/
if (likely(!ptrace) && unlikely(ptrace_reparented(p)))
wo->notask_error = 0;
return 0;
}
/* slay zombie? */
if (p->exit_state == EXIT_ZOMBIE) {

View File

@ -74,11 +74,17 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
/*
* Ensure the task is not frozen.
* Also, when a freshly created task is scheduled once, changes
* its state to TASK_UNINTERRUPTIBLE without having ever been
* switched out once, it musn't be checked.
* Also, skip vfork and any other user process that freezer should skip.
*/
if (unlikely(t->flags & PF_FROZEN || !switch_count))
if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
return;
/*
* When a freshly created task is scheduled once, changes its state to
* TASK_UNINTERRUPTIBLE without having ever been switched out once, it
* musn't be checked.
*/
if (unlikely(!switch_count))
return;
if (switch_count != t->last_switch_count) {

View File

@ -96,9 +96,20 @@ void __ptrace_unlink(struct task_struct *child)
*/
if (!(child->flags & PF_EXITING) &&
(child->signal->flags & SIGNAL_STOP_STOPPED ||
child->signal->group_stop_count))
child->signal->group_stop_count)) {
child->jobctl |= JOBCTL_STOP_PENDING;
/*
* This is only possible if this thread was cloned by the
* traced task running in the stopped group, set the signal
* for the future reports.
* FIXME: we should change ptrace_init_task() to handle this
* case.
*/
if (!(child->jobctl & JOBCTL_STOP_SIGMASK))
child->jobctl |= SIGSTOP;
}
/*
* If transition to TASK_STOPPED is pending or in TASK_TRACED, kick
* @child in the butt. Note that @resume should be used iff @child

View File

@ -1994,8 +1994,6 @@ static bool do_signal_stop(int signr)
*/
if (!(sig->flags & SIGNAL_STOP_STOPPED))
sig->group_exit_code = signr;
else
WARN_ON_ONCE(!current->ptrace);
sig->group_stop_count = 0;

View File

@ -643,7 +643,7 @@ static int hci_dev_do_close(struct hci_dev *hdev)
if (!test_bit(HCI_RAW, &hdev->flags)) {
set_bit(HCI_INIT, &hdev->flags);
__hci_request(hdev, hci_reset_req, 0,
msecs_to_jiffies(HCI_INIT_TIMEOUT));
msecs_to_jiffies(250));
clear_bit(HCI_INIT, &hdev->flags);
}

View File

@ -818,11 +818,11 @@ skip_unblock:
static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
{
unsigned long mask;
uint32_t limit, roundedF;
u64 limit, roundedF;
int slot_shift = cl->grp->slot_shift;
roundedF = qfq_round_down(cl->F, slot_shift);
limit = qfq_round_down(q->V, slot_shift) + (1UL << slot_shift);
limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
if (!qfq_gt(cl->F, q->V) || qfq_gt(roundedF, limit)) {
/* timestamp was stale */

View File

@ -381,7 +381,7 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
void **value, size_t *len)
{
if (unlikely(IS_PRIVATE(inode)))
return 0;
return -EOPNOTSUPP;
return security_ops->inode_init_security(inode, dir, qstr, name, value,
len);
}