Merge branch 'irq/for-xen' into irq/core

irq/for-xen contains new functionality to avoid Xen private irq
hackery. That branch has a single irq commit and is pulled by Xen to
base their new features on.

Merge it into irq/core as other patches modify the same code.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
Thomas Gleixner 2011-02-08 16:38:00 +01:00
commit c9a443cdf7
92 changed files with 979 additions and 478 deletions

View File

@ -296,6 +296,7 @@ Conexant 5066
=============
laptop Basic Laptop config (default)
hp-laptop HP laptops, e g G60
asus Asus K52JU, Lenovo G560
dell-laptop Dell laptops
dell-vostro Dell Vostro
olpc-xo-1_5 OLPC XO 1.5

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View File

@ -838,7 +838,7 @@ EXPORT_SYMBOL(ep93xx_i2s_release);
static struct resource ep93xx_ac97_resources[] = {
{
.start = EP93XX_AAC_PHYS_BASE,
.end = EP93XX_AAC_PHYS_BASE + 0xb0 - 1,
.end = EP93XX_AAC_PHYS_BASE + 0xac - 1,
.flags = IORESOURCE_MEM,
},
{

View File

@ -180,7 +180,7 @@ static const uint32_t mx25pdk_keymap[] = {
KEY(3, 3, KEY_POWER),
};
static const struct matrix_keymap_data mx25pdk_keymap_data __initdata = {
static const struct matrix_keymap_data mx25pdk_keymap_data __initconst = {
.keymap = mx25pdk_keymap,
.keymap_size = ARRAY_SIZE(mx25pdk_keymap),
};

View File

@ -432,7 +432,7 @@ static struct clocksource clocksource_ixp4xx = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
unsigned long ixp4xx_timer_freq = FREQ;
unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
EXPORT_SYMBOL(ixp4xx_timer_freq);
static void __init ixp4xx_clocksource_init(void)
{
@ -496,7 +496,7 @@ static struct clock_event_device clockevent_ixp4xx = {
static void __init ixp4xx_clockevent_init(void)
{
clockevent_ixp4xx.mult = div_sc(FREQ, NSEC_PER_SEC,
clockevent_ixp4xx.mult = div_sc(IXP4XX_TIMER_FREQ, NSEC_PER_SEC,
clockevent_ixp4xx.shift);
clockevent_ixp4xx.max_delta_ns =
clockevent_delta2ns(0xfffffffe, &clockevent_ixp4xx);

View File

@ -10,6 +10,7 @@
* 66.66... MHz. We do a convulted calculation of CLOCK_TICK_RATE b/c the
* timer register ignores the bottom 2 bits of the LATCH value.
*/
#define FREQ 66666000
#define CLOCK_TICK_RATE (((FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
#define IXP4XX_TIMER_FREQ 66666000
#define CLOCK_TICK_RATE \
(((IXP4XX_TIMER_FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)

View File

@ -265,6 +265,11 @@ void qmgr_release_queue(unsigned int queue)
qmgr_queue_descs[queue], queue);
qmgr_queue_descs[queue][0] = '\x0';
#endif
while ((addr = qmgr_get_entry(queue)))
printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
queue, addr);
__raw_writel(0, &qmgr_regs->sram[queue]);
used_sram_bitmap[0] &= ~mask[0];
@ -275,10 +280,6 @@ void qmgr_release_queue(unsigned int queue)
spin_unlock_irq(&qmgr_lock);
module_put(THIS_MODULE);
while ((addr = qmgr_get_entry(queue)))
printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
queue, addr);
}
static int qmgr_init(void)

View File

@ -304,7 +304,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
reg &= ~BM_CLKCTRL_##dr##_DIV; \
reg |= div << BP_CLKCTRL_##dr##_DIV; \
if (reg | (1 << clk->enable_shift)) { \
if (reg & (1 << clk->enable_shift)) { \
pr_err("%s: clock is gated\n", __func__); \
return -EINVAL; \
} \
@ -347,7 +347,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \
{ \
if (parent != clk->parent) { \
__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
HW_CLKCTRL_CLKSEQ_TOG); \
CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
clk->parent = parent; \
} \
\

View File

@ -355,12 +355,12 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
} else { \
reg &= ~BM_CLKCTRL_##dr##_DIV; \
reg |= div << BP_CLKCTRL_##dr##_DIV; \
if (reg | (1 << clk->enable_shift)) { \
if (reg & (1 << clk->enable_shift)) { \
pr_err("%s: clock is gated\n", __func__); \
return -EINVAL; \
} \
} \
__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU); \
__raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
\
for (i = 10000; i; i--) \
if (!(__raw_readl(CLKCTRL_BASE_ADDR + \
@ -483,7 +483,7 @@ static int name##_set_parent(struct clk *clk, struct clk *parent) \
{ \
if (parent != clk->parent) { \
__raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
HW_CLKCTRL_CLKSEQ_TOG); \
CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
clk->parent = parent; \
} \
\
@ -609,7 +609,6 @@ static struct clk_lookup lookups[] = {
_REGISTER_CLOCK("duart", NULL, uart_clk)
_REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
_REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
_REGISTER_CLOCK("fec.0", NULL, fec_clk)
_REGISTER_CLOCK("rtc", NULL, rtc_clk)
_REGISTER_CLOCK("pll2", NULL, pll2_clk)
_REGISTER_CLOCK(NULL, "hclk", hbus_clk)

View File

@ -57,7 +57,6 @@ static void __clk_disable(struct clk *clk)
if (clk->disable)
clk->disable(clk);
__clk_disable(clk->parent);
__clk_disable(clk->secondary);
}
}
@ -68,7 +67,6 @@ static int __clk_enable(struct clk *clk)
if (clk->usecount++ == 0) {
__clk_enable(clk->parent);
__clk_enable(clk->secondary);
if (clk->enable)
clk->enable(clk);

View File

@ -139,6 +139,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
struct mxs_gpio_port *port = (struct mxs_gpio_port *)get_irq_data(irq);
u32 gpio_irq_no_base = port->virtual_irq_start;
desc->irq_data.chip->irq_ack(&desc->irq_data);
irq_stat = __raw_readl(port->base + PINCTRL_IRQSTAT(port->id)) &
__raw_readl(port->base + PINCTRL_IRQEN(port->id));

View File

@ -29,8 +29,6 @@ struct clk {
int id;
/* Source clock this clk depends on */
struct clk *parent;
/* Secondary clock to enable/disable with this clock */
struct clk *secondary;
/* Reference count of clock enable/disable */
__s8 usecount;
/* Register bit position for clock's enable/disable control. */

View File

@ -37,7 +37,7 @@ int omap_lcd_dma_running(void)
* On OMAP1510, internal LCD controller will start the transfer
* when it gets enabled, so assume DMA running if LCD enabled.
*/
if (cpu_is_omap1510())
if (cpu_is_omap15xx())
if (omap_readw(OMAP_LCDC_CONTROL) & OMAP_LCDC_CTRL_LCD_EN)
return 1;
@ -95,7 +95,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_single_transfer);
void omap_set_lcd_dma_b1_rotation(int rotate)
{
if (cpu_is_omap1510()) {
if (cpu_is_omap15xx()) {
printk(KERN_ERR "DMA rotation is not supported in 1510 mode\n");
BUG();
return;
@ -106,7 +106,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_rotation);
void omap_set_lcd_dma_b1_mirror(int mirror)
{
if (cpu_is_omap1510()) {
if (cpu_is_omap15xx()) {
printk(KERN_ERR "DMA mirror is not supported in 1510 mode\n");
BUG();
}
@ -116,7 +116,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_mirror);
void omap_set_lcd_dma_b1_vxres(unsigned long vxres)
{
if (cpu_is_omap1510()) {
if (cpu_is_omap15xx()) {
printk(KERN_ERR "DMA virtual resulotion is not supported "
"in 1510 mode\n");
BUG();
@ -127,7 +127,7 @@ EXPORT_SYMBOL(omap_set_lcd_dma_b1_vxres);
void omap_set_lcd_dma_b1_scale(unsigned int xscale, unsigned int yscale)
{
if (cpu_is_omap1510()) {
if (cpu_is_omap15xx()) {
printk(KERN_ERR "DMA scale is not supported in 1510 mode\n");
BUG();
}
@ -177,7 +177,7 @@ static void set_b1_regs(void)
bottom = PIXADDR(lcd_dma.xres - 1, lcd_dma.yres - 1);
/* 1510 DMA requires the bottom address to be 2 more
* than the actual last memory access location. */
if (cpu_is_omap1510() &&
if (cpu_is_omap15xx() &&
lcd_dma.data_type == OMAP_DMA_DATA_TYPE_S32)
bottom += 2;
ei = PIXSTEP(0, 0, 1, 0);
@ -241,7 +241,7 @@ static void set_b1_regs(void)
return; /* Suppress warning about uninitialized vars */
}
if (cpu_is_omap1510()) {
if (cpu_is_omap15xx()) {
omap_writew(top >> 16, OMAP1510_DMA_LCD_TOP_F1_U);
omap_writew(top, OMAP1510_DMA_LCD_TOP_F1_L);
omap_writew(bottom >> 16, OMAP1510_DMA_LCD_BOT_F1_U);
@ -343,7 +343,7 @@ void omap_free_lcd_dma(void)
BUG();
return;
}
if (!cpu_is_omap1510())
if (!cpu_is_omap15xx())
omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
OMAP1610_DMA_LCD_CCR);
lcd_dma.reserved = 0;
@ -360,7 +360,7 @@ void omap_enable_lcd_dma(void)
* connected. Otherwise the OMAP internal controller will
* start the transfer when it gets enabled.
*/
if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
return;
w = omap_readw(OMAP1610_DMA_LCD_CTRL);
@ -378,14 +378,14 @@ EXPORT_SYMBOL(omap_enable_lcd_dma);
void omap_setup_lcd_dma(void)
{
BUG_ON(lcd_dma.active);
if (!cpu_is_omap1510()) {
if (!cpu_is_omap15xx()) {
/* Set some reasonable defaults */
omap_writew(0x5440, OMAP1610_DMA_LCD_CCR);
omap_writew(0x9102, OMAP1610_DMA_LCD_CSDP);
omap_writew(0x0004, OMAP1610_DMA_LCD_LCH_CTRL);
}
set_b1_regs();
if (!cpu_is_omap1510()) {
if (!cpu_is_omap15xx()) {
u16 w;
w = omap_readw(OMAP1610_DMA_LCD_CCR);
@ -407,7 +407,7 @@ void omap_stop_lcd_dma(void)
u16 w;
lcd_dma.active = 0;
if (cpu_is_omap1510() || !lcd_dma.ext_ctrl)
if (cpu_is_omap15xx() || !lcd_dma.ext_ctrl)
return;
w = omap_readw(OMAP1610_DMA_LCD_CCR);

View File

@ -44,7 +44,6 @@
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/io.h>
#include <linux/sched.h>
#include <asm/system.h>
#include <mach/hardware.h>

View File

@ -115,9 +115,6 @@ static struct omap2_hsmmc_info mmc[] = {
static int devkit8000_panel_enable_lcd(struct omap_dss_device *dssdev)
{
twl_i2c_write_u8(TWL4030_MODULE_GPIO, 0x80, REG_GPIODATADIR1);
twl_i2c_write_u8(TWL4030_MODULE_LED, 0x0, 0x0);
if (gpio_is_valid(dssdev->reset_gpio))
gpio_set_value_cansleep(dssdev->reset_gpio, 1);
return 0;
@ -247,6 +244,8 @@ static struct gpio_led gpio_leds[];
static int devkit8000_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
int ret;
omap_mux_init_gpio(29, OMAP_PIN_INPUT);
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
mmc[0].gpio_cd = gpio + 0;
@ -255,17 +254,23 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
/* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
/* gpio + 1 is "LCD_PWREN" (out, active high) */
devkit8000_lcd_device.reset_gpio = gpio + 1;
gpio_request(devkit8000_lcd_device.reset_gpio, "LCD_PWREN");
/* Disable until needed */
gpio_direction_output(devkit8000_lcd_device.reset_gpio, 0);
/* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0;
ret = gpio_request_one(devkit8000_lcd_device.reset_gpio,
GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN");
if (ret < 0) {
devkit8000_lcd_device.reset_gpio = -EINVAL;
printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n");
}
/* gpio + 7 is "DVI_PD" (out, active low) */
devkit8000_dvi_device.reset_gpio = gpio + 7;
gpio_request(devkit8000_dvi_device.reset_gpio, "DVI PowerDown");
/* Disable until needed */
gpio_direction_output(devkit8000_dvi_device.reset_gpio, 0);
ret = gpio_request_one(devkit8000_dvi_device.reset_gpio,
GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown");
if (ret < 0) {
devkit8000_dvi_device.reset_gpio = -EINVAL;
printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n");
}
return 0;
}

View File

@ -409,8 +409,6 @@ static void __init omap4_panda_init(void)
platform_add_devices(panda_devices, ARRAY_SIZE(panda_devices));
omap_serial_init();
omap4_twl6030_hsmmc_init(mmc);
/* OMAP4 Panda uses internal transceiver so register nop transceiver */
usb_nop_xceiv_register();
omap4_ehci_init();
usb_musb_init(&musb_board_data);
}

View File

@ -40,9 +40,6 @@ static struct regulator_consumer_supply rm680_vemmc_consumers[] = {
static struct regulator_init_data rm680_vemmc = {
.constraints = {
.name = "rm680_vemmc",
.min_uV = 2900000,
.max_uV = 2900000,
.apply_uV = 1,
.valid_modes_mask = REGULATOR_MODE_NORMAL
| REGULATOR_MODE_STANDBY,
.valid_ops_mask = REGULATOR_CHANGE_STATUS

View File

@ -1000,6 +1000,7 @@ int __init omap_mux_init(const char *name, u32 flags,
if (!partition->base) {
pr_err("%s: Could not ioremap mux partition at 0x%08x\n",
__func__, partition->phys);
kfree(partition);
return -ENODEV;
}

View File

@ -168,9 +168,10 @@ static void omap3_core_restore_context(void)
* once during boot sequence, but this works as we are not using secure
* services.
*/
static void omap3_save_secure_ram_context(u32 target_mpu_state)
static void omap3_save_secure_ram_context(void)
{
u32 ret;
int mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
/*
@ -181,7 +182,7 @@ static void omap3_save_secure_ram_context(u32 target_mpu_state)
pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_ON);
ret = _omap_save_secure_sram((u32 *)
__pa(omap3_secure_ram_storage));
pwrdm_set_next_pwrst(mpu_pwrdm, target_mpu_state);
pwrdm_set_next_pwrst(mpu_pwrdm, mpu_next_state);
/* Following is for error tracking, it should not happen */
if (ret) {
printk(KERN_ERR "save_secure_sram() returns %08x\n",
@ -1094,7 +1095,7 @@ static int __init omap3_pm_init(void)
local_fiq_disable();
omap_dma_global_context_save();
omap3_save_secure_ram_context(PWRDM_POWER_ON);
omap3_save_secure_ram_context();
omap_dma_global_context_restore();
local_irq_enable();

View File

@ -780,8 +780,7 @@ static int omap_sr_autocomp_show(void *data, u64 *val)
struct omap_sr *sr_info = (struct omap_sr *) data;
if (!sr_info) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, sr_info->voltdm->name);
pr_warning("%s: omap_sr struct not found\n", __func__);
return -EINVAL;
}
@ -795,8 +794,7 @@ static int omap_sr_autocomp_store(void *data, u64 val)
struct omap_sr *sr_info = (struct omap_sr *) data;
if (!sr_info) {
pr_warning("%s: omap_sr struct for sr_%s not found\n",
__func__, sr_info->voltdm->name);
pr_warning("%s: omap_sr struct not found\n", __func__);
return -EINVAL;
}
@ -834,7 +832,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
if (!pdata) {
dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
return -EINVAL;
ret = -EINVAL;
goto err_free_devinfo;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -966,7 +965,7 @@ static int __devexit omap_sr_remove(struct platform_device *pdev)
}
sr_info = _sr_lookup(pdata->voltdm);
if (!sr_info) {
if (IS_ERR(sr_info)) {
dev_warn(&pdev->dev, "%s: omap_sr struct not found\n",
__func__);
return -EINVAL;

View File

@ -471,6 +471,7 @@ static void __init vdd_debugfs_init(struct omap_vdd_info *vdd)
strcat(name, vdd->voltdm.name);
vdd->debug_dir = debugfs_create_dir(name, voltage_dir);
kfree(name);
if (IS_ERR(vdd->debug_dir)) {
pr_warning("%s: Unable to create debugfs directory for"
" vdd_%s\n", __func__, vdd->voltdm.name);

View File

@ -95,6 +95,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
case MACH_TYPE_MX35_3DS:
case MACH_TYPE_PCM043:
case MACH_TYPE_LILLY1131:
case MACH_TYPE_VPR200:
uart_base = MX3X_UART1_BASE_ADDR;
break;
case MACH_TYPE_MAGX_ZN5:
@ -102,6 +103,7 @@ static __inline__ void __arch_decomp_setup(unsigned long arch_id)
break;
case MACH_TYPE_MX51_BABBAGE:
case MACH_TYPE_EUKREA_CPUIMX51SD:
case MACH_TYPE_MX51_3DS:
uart_base = MX51_UART1_BASE_ADDR;
break;
case MACH_TYPE_MX50_RDP:

View File

@ -12,7 +12,7 @@
#
# http://www.arm.linux.org.uk/developer/machines/?action=new
#
# Last update: Sun Dec 12 23:24:27 2010
# Last update: Mon Feb 7 08:59:27 2011
#
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
#
@ -2240,7 +2240,7 @@ arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250
vs_v210 MACH_VS_V210 VS_V210 2252
vs_v212 MACH_VS_V212 VS_V212 2253
hmt MACH_HMT HMT 2254
suen3 MACH_SUEN3 SUEN3 2255
km_kirkwood MACH_KM_KIRKWOOD KM_KIRKWOOD 2255
vesper MACH_VESPER VESPER 2256
str9 MACH_STR9 STR9 2257
omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
@ -2987,7 +2987,7 @@ pxwnas_500_1000 MACH_PXWNAS_500_1000 PXWNAS_500_1000 3001
ea20 MACH_EA20 EA20 3002
awm2 MACH_AWM2 AWM2 3003
ti8148evm MACH_TI8148EVM TI8148EVM 3004
tegra_seaboard MACH_TEGRA_SEABOARD TEGRA_SEABOARD 3005
seaboard MACH_SEABOARD SEABOARD 3005
linkstation_chlv2 MACH_LINKSTATION_CHLV2 LINKSTATION_CHLV2 3006
tera_pro2_rack MACH_TERA_PRO2_RACK TERA_PRO2_RACK 3007
rubys MACH_RUBYS RUBYS 3008
@ -3190,7 +3190,7 @@ synergy MACH_SYNERGY SYNERGY 3205
ics_if_voip MACH_ICS_IF_VOIP ICS_IF_VOIP 3206
wlf_cragg_6410 MACH_WLF_CRAGG_6410 WLF_CRAGG_6410 3207
punica MACH_PUNICA PUNICA 3208
sbc_nt250 MACH_SBC_NT250 SBC_NT250 3209
trimslice MACH_TRIMSLICE TRIMSLICE 3209
mx27_wmultra MACH_MX27_WMULTRA MX27_WMULTRA 3210
mackerel MACH_MACKEREL MACKEREL 3211
fa9x27 MACH_FA9X27 FA9X27 3213
@ -3219,3 +3219,100 @@ pivicc MACH_PIVICC PIVICC 3235
pcm048 MACH_PCM048 PCM048 3236
dds MACH_DDS DDS 3237
chalten_xa1 MACH_CHALTEN_XA1 CHALTEN_XA1 3238
ts48xx MACH_TS48XX TS48XX 3239
tonga2_tfttimer MACH_TONGA2_TFTTIMER TONGA2_TFTTIMER 3240
whistler MACH_WHISTLER WHISTLER 3241
asl_phoenix MACH_ASL_PHOENIX ASL_PHOENIX 3242
at91sam9263otlite MACH_AT91SAM9263OTLITE AT91SAM9263OTLITE 3243
ddplug MACH_DDPLUG DDPLUG 3244
d2plug MACH_D2PLUG D2PLUG 3245
kzm9d MACH_KZM9D KZM9D 3246
verdi_lte MACH_VERDI_LTE VERDI_LTE 3247
nanozoom MACH_NANOZOOM NANOZOOM 3248
dm3730_som_lv MACH_DM3730_SOM_LV DM3730_SOM_LV 3249
dm3730_torpedo MACH_DM3730_TORPEDO DM3730_TORPEDO 3250
anchovy MACH_ANCHOVY ANCHOVY 3251
re2rev20 MACH_RE2REV20 RE2REV20 3253
re2rev21 MACH_RE2REV21 RE2REV21 3254
cns21xx MACH_CNS21XX CNS21XX 3255
rider MACH_RIDER RIDER 3257
nsk330 MACH_NSK330 NSK330 3258
cns2133evb MACH_CNS2133EVB CNS2133EVB 3259
z3_816x_mod MACH_Z3_816X_MOD Z3_816X_MOD 3260
z3_814x_mod MACH_Z3_814X_MOD Z3_814X_MOD 3261
beect MACH_BEECT BEECT 3262
dma_thunderbug MACH_DMA_THUNDERBUG DMA_THUNDERBUG 3263
omn_at91sam9g20 MACH_OMN_AT91SAM9G20 OMN_AT91SAM9G20 3264
mx25_e2s_uc MACH_MX25_E2S_UC MX25_E2S_UC 3265
mione MACH_MIONE MIONE 3266
top9000_tcu MACH_TOP9000_TCU TOP9000_TCU 3267
top9000_bsl MACH_TOP9000_BSL TOP9000_BSL 3268
kingdom MACH_KINGDOM KINGDOM 3269
armadillo460 MACH_ARMADILLO460 ARMADILLO460 3270
lq2 MACH_LQ2 LQ2 3271
sweda_tms2 MACH_SWEDA_TMS2 SWEDA_TMS2 3272
mx53_loco MACH_MX53_LOCO MX53_LOCO 3273
acer_a8 MACH_ACER_A8 ACER_A8 3275
acer_gauguin MACH_ACER_GAUGUIN ACER_GAUGUIN 3276
guppy MACH_GUPPY GUPPY 3277
mx61_ard MACH_MX61_ARD MX61_ARD 3278
tx53 MACH_TX53 TX53 3279
omapl138_case_a3 MACH_OMAPL138_CASE_A3 OMAPL138_CASE_A3 3280
uemd MACH_UEMD UEMD 3281
ccwmx51mut MACH_CCWMX51MUT CCWMX51MUT 3282
rockhopper MACH_ROCKHOPPER ROCKHOPPER 3283
nookcolor MACH_NOOKCOLOR NOOKCOLOR 3284
hkdkc100 MACH_HKDKC100 HKDKC100 3285
ts42xx MACH_TS42XX TS42XX 3286
aebl MACH_AEBL AEBL 3287
wario MACH_WARIO WARIO 3288
gfs_spm MACH_GFS_SPM GFS_SPM 3289
cm_t3730 MACH_CM_T3730 CM_T3730 3290
isc3 MACH_ISC3 ISC3 3291
rascal MACH_RASCAL RASCAL 3292
hrefv60 MACH_HREFV60 HREFV60 3293
tpt_2_0 MACH_TPT_2_0 TPT_2_0 3294
pyramid_td MACH_PYRAMID_TD PYRAMID_TD 3295
splendor MACH_SPLENDOR SPLENDOR 3296
guf_planet MACH_GUF_PLANET GUF_PLANET 3297
msm8x60_qt MACH_MSM8X60_QT MSM8X60_QT 3298
htc_hd_mini MACH_HTC_HD_MINI HTC_HD_MINI 3299
athene MACH_ATHENE ATHENE 3300
deep_r_ek_1 MACH_DEEP_R_EK_1 DEEP_R_EK_1 3301
vivow_ct MACH_VIVOW_CT VIVOW_CT 3302
nery_1000 MACH_NERY_1000 NERY_1000 3303
rfl109145_ssrv MACH_RFL109145_SSRV RFL109145_SSRV 3304
nmh MACH_NMH NMH 3305
wn802t MACH_WN802T WN802T 3306
dragonet MACH_DRAGONET DRAGONET 3307
geneva_b MACH_GENEVA_B GENEVA_B 3308
at91sam9263desk16l MACH_AT91SAM9263DESK16L AT91SAM9263DESK16L 3309
bcmhana_sv MACH_BCMHANA_SV BCMHANA_SV 3310
bcmhana_tablet MACH_BCMHANA_TABLET BCMHANA_TABLET 3311
koi MACH_KOI KOI 3312
ts4800 MACH_TS4800 TS4800 3313
tqma9263 MACH_TQMA9263 TQMA9263 3314
holiday MACH_HOLIDAY HOLIDAY 3315
dma_6410 MACH_DMA6410 DMA6410 3316
pcats_overlay MACH_PCATS_OVERLAY PCATS_OVERLAY 3317
hwgw6410 MACH_HWGW6410 HWGW6410 3318
shenzhou MACH_SHENZHOU SHENZHOU 3319
cwme9210 MACH_CWME9210 CWME9210 3320
cwme9210js MACH_CWME9210JS CWME9210JS 3321
pgs_v1 MACH_PGS_SITARA PGS_SITARA 3322
colibri_tegra2 MACH_COLIBRI_TEGRA2 COLIBRI_TEGRA2 3323
w21 MACH_W21 W21 3324
polysat1 MACH_POLYSAT1 POLYSAT1 3325
dataway MACH_DATAWAY DATAWAY 3326
cobral138 MACH_COBRAL138 COBRAL138 3327
roverpcs8 MACH_ROVERPCS8 ROVERPCS8 3328
marvelc MACH_MARVELC MARVELC 3329
navefihid MACH_NAVEFIHID NAVEFIHID 3330
dm365_cv100 MACH_DM365_CV100 DM365_CV100 3331
able MACH_ABLE ABLE 3332
legacy MACH_LEGACY LEGACY 3333
icong MACH_ICONG ICONG 3334
rover_g8 MACH_ROVER_G8 ROVER_G8 3335
t5388p MACH_T5388P T5388P 3336
dingo MACH_DINGO DINGO 3337
goflexhome MACH_GOFLEXHOME GOFLEXHOME 3338

View File

@ -40,8 +40,8 @@
/* MAS registers bit definitions */
#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000)
#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000)
#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
#define MAS0_NV(x) ((x) & 0x00000FFF)
#define MAS0_HES 0x00004000
#define MAS0_WQ_ALLWAYS 0x00000000
@ -50,12 +50,12 @@
#define MAS1_VALID 0x80000000
#define MAS1_IPROT 0x40000000
#define MAS1_TID(x) ((x << 16) & 0x3FFF0000)
#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000)
#define MAS1_IND 0x00002000
#define MAS1_TS 0x00001000
#define MAS1_TSIZE_MASK 0x00000f80
#define MAS1_TSIZE_SHIFT 7
#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
#define MAS2_EPN 0xFFFFF000
#define MAS2_X0 0x00000040

View File

@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr;
#ifdef CONFIG_FLATMEM
#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr))
#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)

View File

@ -18,7 +18,7 @@
#include <asm/mmu.h>
_GLOBAL(__setup_cpu_603)
mflr r4
mflr r5
BEGIN_MMU_FTR_SECTION
li r10,0
mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
bl __init_fpu_registers
END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
bl setup_common_caches
mtlr r4
mtlr r5
blr
_GLOBAL(__setup_cpu_604)
mflr r4
mflr r5
bl setup_common_caches
bl setup_604_hid0
mtlr r4
mtlr r5
blr
_GLOBAL(__setup_cpu_750)
mflr r4
mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
mtlr r4
mtlr r5
blr
_GLOBAL(__setup_cpu_750cx)
mflr r4
mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750cx
mtlr r4
mtlr r5
blr
_GLOBAL(__setup_cpu_750fx)
mflr r4
mflr r5
bl __init_fpu_registers
bl setup_common_caches
bl setup_750_7400_hid0
bl setup_750fx
mtlr r4
mtlr r5
blr
_GLOBAL(__setup_cpu_7400)
mflr r4
mflr r5
bl __init_fpu_registers
bl setup_7400_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
mtlr r4
mtlr r5
blr
_GLOBAL(__setup_cpu_7410)
mflr r4
mflr r5
bl __init_fpu_registers
bl setup_7410_workarounds
bl setup_common_caches
bl setup_750_7400_hid0
li r3,0
mtspr SPRN_L2CR2,r3
mtlr r4
mtlr r5
blr
_GLOBAL(__setup_cpu_745x)
mflr r4
mflr r5
bl setup_common_caches
bl setup_745x_specifics
mtlr r4
mtlr r5
blr
/* Enable caches for 603's, 604, 750 & 7400 */
@ -194,10 +194,10 @@ setup_750cx:
cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
bnelr
lwz r6,CPU_SPEC_FEATURES(r5)
lwz r6,CPU_SPEC_FEATURES(r4)
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
stw r6,CPU_SPEC_FEATURES(r4)
blr
/* 750fx specific
@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
andis. r11,r11,L3CR_L3E@h
beq 1f
END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
lwz r6,CPU_SPEC_FEATURES(r5)
lwz r6,CPU_SPEC_FEATURES(r4)
andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
beq 1f
li r7,CPU_FTR_CAN_NAP
andc r6,r6,r7
stw r6,CPU_SPEC_FEATURES(r5)
stw r6,CPU_SPEC_FEATURES(r4)
1:
mfspr r11,SPRN_HID0

View File

@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
* pointer on ppc64 and booke as we are running at 0 in real mode
* on ppc64 and reloc_offset is always 0 on booke.
*/
if (s->cpu_setup) {
s->cpu_setup(offset, s);
if (t->cpu_setup) {
t->cpu_setup(offset, t);
}
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
}

View File

@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
dbg("removing cpu %lu from node %d\n", cpu, node);
if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
} else {
printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
cpu, node);
@ -1289,10 +1289,9 @@ u64 memory_hotplug_max(void)
}
#endif /* CONFIG_MEMORY_HOTPLUG */
/* Vrtual Processor Home Node (VPHN) support */
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
#define VPHN_NR_CHANGE_CTRS (8)
static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
static cpumask_t cpu_associativity_changes_mask;
static int vphn_enabled;
static void set_topology_timer(void);
@ -1303,16 +1302,18 @@ static void set_topology_timer(void);
*/
static void setup_cpu_associativity_change_counters(void)
{
int cpu = 0;
int cpu;
/* The VPHN feature supports a maximum of 8 reference points */
BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
for_each_possible_cpu(cpu) {
int i = 0;
int i;
u8 *counts = vphn_cpu_change_counts[cpu];
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
for (i = 0; i < distance_ref_points_depth; i++)
counts[i] = hypervisor_counts[i];
}
}
}
@ -1329,7 +1330,7 @@ static void setup_cpu_associativity_change_counters(void)
*/
static int update_cpu_associativity_changes_mask(void)
{
int cpu = 0, nr_cpus = 0;
int cpu, nr_cpus = 0;
cpumask_t *changes = &cpu_associativity_changes_mask;
cpumask_clear(changes);
@ -1339,8 +1340,8 @@ static int update_cpu_associativity_changes_mask(void)
u8 *counts = vphn_cpu_change_counts[cpu];
volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) {
if (hypervisor_counts[i] > counts[i]) {
for (i = 0; i < distance_ref_points_depth; i++) {
if (hypervisor_counts[i] != counts[i]) {
counts[i] = hypervisor_counts[i];
changed = 1;
}
@ -1354,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void)
return nr_cpus;
}
/* 6 64-bit registers unpacked into 12 32-bit associativity values */
#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32))
/*
* 6 64-bit registers unpacked into 12 32-bit associativity values. To form
* the complete property we have to add the length in the first cell.
*/
#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
/*
* Convert the associativity domain numbers returned from the hypervisor
@ -1363,15 +1367,14 @@ static int update_cpu_associativity_changes_mask(void)
*/
static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
{
int i = 0;
int nr_assoc_doms = 0;
int i, nr_assoc_doms = 0;
const u16 *field = (const u16*) packed;
#define VPHN_FIELD_UNUSED (0xffff)
#define VPHN_FIELD_MSB (0x8000)
#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) {
for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
if (*field == VPHN_FIELD_UNUSED) {
/* All significant fields processed, and remaining
* fields contain the reserved value of all 1's.
@ -1379,14 +1382,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
*/
unpacked[i] = *((u32*)field);
field += 2;
}
else if (*field & VPHN_FIELD_MSB) {
} else if (*field & VPHN_FIELD_MSB) {
/* Data is in the lower 15 bits of this field */
unpacked[i] = *field & VPHN_FIELD_MASK;
field++;
nr_assoc_doms++;
}
else {
} else {
/* Data is in the lower 15 bits of this field
* concatenated with the next 16 bit field
*/
@ -1396,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
}
}
/* The first cell contains the length of the property */
unpacked[0] = nr_assoc_doms;
return nr_assoc_doms;
}
@ -1405,7 +1409,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
*/
static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
{
long rc = 0;
long rc;
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
u64 flags = 1;
int hwcpu = get_hard_smp_processor_id(cpu);
@ -1419,7 +1423,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
static long vphn_get_associativity(unsigned long cpu,
unsigned int *associativity)
{
long rc = 0;
long rc;
rc = hcall_vphn(cpu, associativity);
@ -1445,9 +1449,9 @@ static long vphn_get_associativity(unsigned long cpu,
*/
int arch_update_cpu_topology(void)
{
int cpu = 0, nid = 0, old_nid = 0;
int cpu, nid, old_nid;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct sys_device *sysdev = NULL;
struct sys_device *sysdev;
for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity);
@ -1512,7 +1516,8 @@ int start_topology_update(void)
{
int rc = 0;
if (firmware_has_feature(FW_FEATURE_VPHN)) {
if (firmware_has_feature(FW_FEATURE_VPHN) &&
get_lppaca()->shared_proc) {
vphn_enabled = 1;
setup_cpu_associativity_change_counters();
init_timer_deferrable(&topology_timer);

View File

@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page);
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;
/*
* Since the tracing code might execute hcalls we need to guard against
* recursion. One example of this are spinlocks calling H_YIELD on
* shared processor partitions.
*/
static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
void hcall_tracepoint_regfunc(void)
{
hcall_tracepoint_refcount++;
@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void)
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
{
unsigned long flags;
unsigned int *depth;
local_irq_save(flags);
depth = &__get_cpu_var(hcall_trace_depth);
if (*depth)
goto out;
(*depth)++;
trace_hcall_entry(opcode, args);
(*depth)--;
out:
local_irq_restore(flags);
}
void __trace_hcall_exit(long opcode, unsigned long retval,
unsigned long *retbuf)
{
unsigned long flags;
unsigned int *depth;
local_irq_save(flags);
depth = &__get_cpu_var(hcall_trace_depth);
if (*depth)
goto out;
(*depth)++;
trace_hcall_exit(opcode, retval, retbuf);
(*depth)--;
out:
local_irq_restore(flags);
}
#endif

View File

@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
/* Static state in head.S used to set up a CPU */
extern struct {
void *sp;
unsigned short ss;
} stack_start;
extern unsigned long stack_start; /* Initial stack pointer address */
struct smp_ops {
void (*smp_prepare_boot_cpu)(void);

View File

@ -12,10 +12,8 @@
#include <linux/cpumask.h>
#include <asm/segment.h>
#include <asm/desc.h>
#ifdef CONFIG_X86_32
#include <asm/pgtable.h>
#endif
#include <asm/cacheflush.h>
#include "realmode/wakeup.h"
#include "sleep.h"
@ -100,7 +98,7 @@ int acpi_save_state_mem(void)
#else /* CONFIG_64BIT */
header->trampoline_segment = setup_trampoline() >> 4;
#ifdef CONFIG_SMP
stack_start.sp = temp_stack + sizeof(temp_stack);
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
(unsigned long)get_cpu_gdt_table(smp_processor_id());
initial_gs = per_cpu_offset(smp_processor_id());
@ -149,6 +147,15 @@ void __init acpi_reserve_wakeup_memory(void)
memblock_x86_reserve_range(mem, mem + WAKEUP_SIZE, "ACPI WAKEUP");
}
int __init acpi_configure_wakeup_memory(void)
{
if (acpi_realmode)
set_memory_x(acpi_realmode, WAKEUP_SIZE >> PAGE_SHIFT);
return 0;
}
arch_initcall(acpi_configure_wakeup_memory);
static int __init acpi_sleep_setup(char *str)
{

View File

@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
}
/*
* MTRR initialization for all AP's
* Delayed MTRR initialization for all AP's
*/
void mtrr_aps_init(void)
{
if (!use_intel())
return;
/*
* Check if someone has requested the delay of AP MTRR initialization,
* by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
* then we are done.
*/
if (!mtrr_aps_delayed_init)
return;
set_mtrr(~0U, 0, 0, 0);
mtrr_aps_delayed_init = false;
}

View File

@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
*/
__HEAD
ENTRY(startup_32)
movl pa(stack_start),%ecx
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */
testb $(1<<6), BP_loadflags(%esi)
@ -99,7 +101,9 @@ ENTRY(startup_32)
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
movl %eax,%ss
2:
leal -__PAGE_OFFSET(%ecx),%esp
/*
* Clear BSS first so that there are no surprises...
@ -145,8 +149,6 @@ ENTRY(startup_32)
* _brk_end is set up to point to the first "safe" location.
* Mappings are created both at virtual address 0 (identity mapping)
* and PAGE_OFFSET for up to _end.
*
* Note that the stack is not yet set up!
*/
#ifdef CONFIG_X86_PAE
@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
movl pa(stack_start),%ecx
movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp
#endif /* CONFIG_SMP */
default_entry:
@ -347,8 +352,8 @@ default_entry:
movl %eax,%cr0 /* ..and set paging (PG) bit */
ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
1:
/* Set up the stack pointer */
lss stack_start,%esp
/* Shift the stack pointer to a virtual address */
addl $__PAGE_OFFSET, %esp
/*
* Initialize eflags. Some BIOS's leave bits like NT set. This would
@ -360,9 +365,7 @@ default_entry:
#ifdef CONFIG_SMP
cmpb $0, ready
jz 1f /* Initial CPU cleans BSS */
jmp checkCPUtype
1:
jnz checkCPUtype
#endif /* CONFIG_SMP */
/*
@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP
cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder
#ifdef CONFIG_SMP
movb ready, %cl
movb $1, ready
cmpb $0,%cl # the first CPU calls start_kernel
je 1f
movl (stack_start), %esp
1:
#endif /* CONFIG_SMP */
jmp *(initial_code)
/*
@ -670,15 +666,15 @@ ENTRY(initial_page_table)
#endif
.data
.balign 4
ENTRY(stack_start)
.long init_thread_union+THREAD_SIZE
.long __BOOT_DS
ready: .byte 0
early_recursion_flag:
.long 0
ready: .byte 0
int_msg:
.asciz "Unknown interrupt or fault at: %p %p %p\n"

View File

@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
* target processor state.
*/
startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
(unsigned long)stack_start.sp);
stack_start);
/*
* Run STARTUP IPI loop.
@ -785,7 +785,7 @@ do_rest:
#endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
stack_start.sp = (void *) c_idle.idle->thread.sp;
stack_start = c_idle.idle->thread.sp;
/* start_ip had better be page-aligned! */
start_ip = setup_trampoline();

View File

@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
unsigned long pfn)
{
pgprot_t forbidden = __pgprot(0);
pgprot_t required = __pgprot(0);
/*
* The BIOS area between 640k and 1Mb needs to be executable for
@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW;
/*
* .data and .bss should always be writable.
*/
if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
pgprot_val(required) |= _PAGE_RW;
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
/*
@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
#endif
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
return prot;
}

View File

@ -69,11 +69,7 @@ static int ixp4xx_spkr_event(struct input_dev *dev, unsigned int type, unsigned
}
if (value > 20 && value < 32767)
#ifndef FREQ
count = (ixp4xx_get_board_tick_rate() / (value * 4)) - 1;
#else
count = (FREQ / (value * 4)) - 1;
#endif
count = (IXP4XX_TIMER_FREQ / (value * 4)) - 1;
ixp4xx_spkr_control(pin, count);

View File

@ -143,6 +143,7 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev,
rtc->id = id;
rtc->ops = ops;
rtc->owner = owner;
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
rtc->dev.parent = dev;
rtc->dev.class = rtc_class;

View File

@ -464,6 +464,9 @@ int rtc_irq_set_freq(struct rtc_device *rtc, struct rtc_task *task, int freq)
int err = 0;
unsigned long flags;
if (freq <= 0)
return -EINVAL;
spin_lock_irqsave(&rtc->irq_task_lock, flags);
if (rtc->irq_task != NULL && task == NULL)
err = -EBUSY;

View File

@ -37,6 +37,9 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
char *value = NULL;
struct posix_acl *acl;
if (!IS_POSIXACL(inode))
return NULL;
acl = get_cached_acl(inode, type);
if (acl != ACL_NOT_CACHED)
return acl;
@ -84,6 +87,9 @@ static int btrfs_xattr_acl_get(struct dentry *dentry, const char *name,
struct posix_acl *acl;
int ret = 0;
if (!IS_POSIXACL(dentry->d_inode))
return -EOPNOTSUPP;
acl = btrfs_get_acl(dentry->d_inode, type);
if (IS_ERR(acl))

View File

@ -562,7 +562,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
u64 em_len;
u64 em_start;
struct extent_map *em;
int ret;
int ret = -ENOMEM;
u32 *sums;
tree = &BTRFS_I(inode)->io_tree;
@ -577,6 +577,9 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
compressed_len = em->block_len;
cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
if (!cb)
goto out;
atomic_set(&cb->pending_bios, 0);
cb->errors = 0;
cb->inode = inode;
@ -597,13 +600,18 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
nr_pages = (compressed_len + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE;
cb->compressed_pages = kmalloc(sizeof(struct page *) * nr_pages,
cb->compressed_pages = kzalloc(sizeof(struct page *) * nr_pages,
GFP_NOFS);
if (!cb->compressed_pages)
goto fail1;
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
for (page_index = 0; page_index < nr_pages; page_index++) {
cb->compressed_pages[page_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM);
if (!cb->compressed_pages[page_index])
goto fail2;
}
cb->nr_pages = nr_pages;
@ -614,6 +622,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
cb->len = uncompressed_len;
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
if (!comp_bio)
goto fail2;
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
atomic_inc(&cb->pending_bios);
@ -681,6 +691,17 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_put(comp_bio);
return 0;
fail2:
for (page_index = 0; page_index < nr_pages; page_index++)
free_page((unsigned long)cb->compressed_pages[page_index]);
kfree(cb->compressed_pages);
fail1:
kfree(cb);
out:
free_extent_map(em);
return ret;
}
static struct list_head comp_idle_workspace[BTRFS_COMPRESS_TYPES];
@ -900,7 +921,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
return ret;
}
void __exit btrfs_exit_compress(void)
void btrfs_exit_compress(void)
{
free_workspaces();
}

View File

@ -1550,6 +1550,7 @@ static int transaction_kthread(void *arg)
spin_unlock(&root->fs_info->new_trans_lock);
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
if (transid == trans->transid) {
ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret);
@ -2453,10 +2454,14 @@ int btrfs_commit_super(struct btrfs_root *root)
up_write(&root->fs_info->cleanup_work_sem);
trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret);
/* run commit again to drop the original snapshot */
trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_commit_transaction(trans, root);
ret = btrfs_write_and_wait_transaction(NULL, root);
BUG_ON(ret);
@ -2554,6 +2559,8 @@ int close_ctree(struct btrfs_root *root)
kfree(fs_info->chunk_root);
kfree(fs_info->dev_root);
kfree(fs_info->csum_root);
kfree(fs_info);
return 0;
}

View File

@ -171,6 +171,8 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
int ret;
path = btrfs_alloc_path();
if (!path)
return ERR_PTR(-ENOMEM);
if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
key.objectid = root->root_key.objectid;

View File

@ -320,11 +320,6 @@ static int caching_kthread(void *data)
if (!path)
return -ENOMEM;
exclude_super_stripes(extent_root, block_group);
spin_lock(&block_group->space_info->lock);
block_group->space_info->bytes_readonly += block_group->bytes_super;
spin_unlock(&block_group->space_info->lock);
last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
/*
@ -467,8 +462,10 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
cache->cached = BTRFS_CACHE_NO;
}
spin_unlock(&cache->lock);
if (ret == 1)
if (ret == 1) {
free_excluded_extents(fs_info->extent_root, cache);
return 0;
}
}
if (load_cache_only)
@ -3344,8 +3341,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
u64 reserved;
u64 max_reclaim;
u64 reclaimed = 0;
long time_left;
int pause = 1;
int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
int loops = 0;
block_rsv = &root->fs_info->delalloc_block_rsv;
space_info = block_rsv->space_info;
@ -3358,7 +3357,7 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
max_reclaim = min(reserved, to_reclaim);
while (1) {
while (loops < 1024) {
/* have the flusher threads jump in and do some IO */
smp_mb();
nr_pages = min_t(unsigned long, nr_pages,
@ -3366,8 +3365,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
spin_lock(&space_info->lock);
if (reserved > space_info->bytes_reserved)
if (reserved > space_info->bytes_reserved) {
loops = 0;
reclaimed += reserved - space_info->bytes_reserved;
} else {
loops++;
}
reserved = space_info->bytes_reserved;
spin_unlock(&space_info->lock);
@ -3378,7 +3381,12 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
return -EAGAIN;
__set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(pause);
time_left = schedule_timeout(pause);
/* We were interrupted, exit */
if (time_left)
break;
pause <<= 1;
if (pause > HZ / 10)
pause = HZ / 10;
@ -3588,8 +3596,20 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
if (num_bytes > 0) {
if (dest) {
block_rsv_add_bytes(dest, num_bytes, 0);
} else {
spin_lock(&dest->lock);
if (!dest->full) {
u64 bytes_to_add;
bytes_to_add = dest->size - dest->reserved;
bytes_to_add = min(num_bytes, bytes_to_add);
dest->reserved += bytes_to_add;
if (dest->reserved >= dest->size)
dest->full = 1;
num_bytes -= bytes_to_add;
}
spin_unlock(&dest->lock);
}
if (num_bytes) {
spin_lock(&space_info->lock);
space_info->bytes_reserved -= num_bytes;
spin_unlock(&space_info->lock);
@ -4012,6 +4032,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
num_bytes = ALIGN(num_bytes, root->sectorsize);
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
spin_lock(&BTRFS_I(inode)->accounting_lock);
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
@ -5633,6 +5654,7 @@ use_block_rsv(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u32 blocksize)
{
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
int ret;
block_rsv = get_block_rsv(trans, root);
@ -5640,14 +5662,39 @@ use_block_rsv(struct btrfs_trans_handle *trans,
if (block_rsv->size == 0) {
ret = reserve_metadata_bytes(trans, root, block_rsv,
blocksize, 0);
if (ret)
/*
* If we couldn't reserve metadata bytes try and use some from
* the global reserve.
*/
if (ret && block_rsv != global_rsv) {
ret = block_rsv_use_bytes(global_rsv, blocksize);
if (!ret)
return global_rsv;
return ERR_PTR(ret);
} else if (ret) {
return ERR_PTR(ret);
}
return block_rsv;
}
ret = block_rsv_use_bytes(block_rsv, blocksize);
if (!ret)
return block_rsv;
if (ret) {
WARN_ON(1);
ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
0);
if (!ret) {
spin_lock(&block_rsv->lock);
block_rsv->size += blocksize;
spin_unlock(&block_rsv->lock);
return block_rsv;
} else if (ret && block_rsv != global_rsv) {
ret = block_rsv_use_bytes(global_rsv, blocksize);
if (!ret)
return global_rsv;
}
}
return ERR_PTR(-ENOSPC);
}
@ -6221,6 +6268,8 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
BUG_ON(!wc);
trans = btrfs_start_transaction(tree_root, 0);
BUG_ON(IS_ERR(trans));
if (block_rsv)
trans->block_rsv = block_rsv;
@ -6318,6 +6367,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
btrfs_end_transaction_throttle(trans, tree_root);
trans = btrfs_start_transaction(tree_root, 0);
BUG_ON(IS_ERR(trans));
if (block_rsv)
trans->block_rsv = block_rsv;
}
@ -6446,6 +6496,8 @@ static noinline int relocate_inode_pages(struct inode *inode, u64 start,
int ret = 0;
ra = kzalloc(sizeof(*ra), GFP_NOFS);
if (!ra)
return -ENOMEM;
mutex_lock(&inode->i_mutex);
first_index = start >> PAGE_CACHE_SHIFT;
@ -7477,7 +7529,7 @@ int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
BUG_ON(reloc_root->commit_root != NULL);
while (1) {
trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
mutex_lock(&root->fs_info->drop_mutex);
ret = btrfs_drop_snapshot(trans, reloc_root);
@ -7535,7 +7587,7 @@ int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
if (found) {
trans = btrfs_start_transaction(root, 1);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
ret = btrfs_commit_transaction(trans, root);
BUG_ON(ret);
}
@ -7779,7 +7831,7 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
trans = btrfs_start_transaction(extent_root, 1);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
if (extent_key->objectid == 0) {
ret = del_extent_zero(trans, extent_root, path, extent_key);
@ -8270,6 +8322,13 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
if (block_group->cached == BTRFS_CACHE_STARTED)
wait_block_group_cache_done(block_group);
/*
* We haven't cached this block group, which means we could
* possibly have excluded extents on this block group.
*/
if (block_group->cached == BTRFS_CACHE_NO)
free_excluded_extents(info->extent_root, block_group);
btrfs_remove_free_space_cache(block_group);
btrfs_put_block_group(block_group);
@ -8384,6 +8443,13 @@ int btrfs_read_block_groups(struct btrfs_root *root)
cache->flags = btrfs_block_group_flags(&cache->item);
cache->sectorsize = root->sectorsize;
/*
* We need to exclude the super stripes now so that the space
* info has super bytes accounted for, otherwise we'll think
* we have more space than we actually do.
*/
exclude_super_stripes(root, cache);
/*
* check for two cases, either we are full, and therefore
* don't need to bother with the caching work since we won't
@ -8392,12 +8458,10 @@ int btrfs_read_block_groups(struct btrfs_root *root)
* time, particularly in the full case.
*/
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
exclude_super_stripes(root, cache);
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
free_excluded_extents(root, cache);
} else if (btrfs_block_group_used(&cache->item) == 0) {
exclude_super_stripes(root, cache);
cache->last_byte_to_unpin = (u64)-1;
cache->cached = BTRFS_CACHE_FINISHED;
add_new_free_space(cache, root->fs_info,

View File

@ -1865,7 +1865,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
bio_get(bio);
if (tree->ops && tree->ops->submit_bio_hook)
tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
mirror_num, bio_flags, start);
else
submit_bio(rw, bio);
@ -1920,6 +1920,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
nr = bio_get_nr_vecs(bdev);
bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
if (!bio)
return -ENOMEM;
bio_add_page(bio, page, page_size, offset);
bio->bi_end_io = end_io_func;
@ -2126,7 +2128,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
ret = __extent_read_full_page(tree, page, get_extent, &bio, 0,
&bio_flags);
if (bio)
submit_one_bio(READ, bio, 0, bio_flags);
ret = submit_one_bio(READ, bio, 0, bio_flags);
return ret;
}

View File

@ -536,6 +536,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
root = root->fs_info->csum_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
while (1) {
key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
@ -548,7 +550,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
if (path->slots[0] == 0)
goto out;
path->slots[0]--;
} else if (ret < 0) {
goto out;
}
leaf = path->nodes[0];
btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);

View File

@ -793,8 +793,12 @@ again:
for (i = 0; i < num_pages; i++) {
pages[i] = grab_cache_page(inode->i_mapping, index + i);
if (!pages[i]) {
err = -ENOMEM;
BUG_ON(1);
int c;
for (c = i - 1; c >= 0; c--) {
unlock_page(pages[c]);
page_cache_release(pages[c]);
}
return -ENOMEM;
}
wait_on_page_writeback(pages[i]);
}
@ -946,6 +950,10 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *)));
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
if (!pages) {
ret = -ENOMEM;
goto out;
}
/* generic_write_checks can change our pos */
start_pos = pos;
@ -984,8 +992,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
size_t write_bytes = min(iov_iter_count(&i),
nrptrs * (size_t)PAGE_CACHE_SIZE -
offset);
size_t num_pages = (write_bytes + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
size_t num_pages = (write_bytes + offset +
PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
WARN_ON(num_pages > nrptrs);
memset(pages, 0, sizeof(struct page *) * nrptrs);
@ -1015,8 +1023,8 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
copied = btrfs_copy_from_user(pos, num_pages,
write_bytes, pages, &i);
dirty_pages = (copied + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
if (num_pages > dirty_pages) {
if (copied > 0)

View File

@ -987,11 +987,18 @@ tree_search_offset(struct btrfs_block_group_cache *block_group,
return entry;
}
static void unlink_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info)
static inline void
__unlink_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info)
{
rb_erase(&info->offset_index, &block_group->free_space_offset);
block_group->free_extents--;
}
static void unlink_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info)
{
__unlink_free_space(block_group, info);
block_group->free_space -= info->bytes;
}
@ -1016,14 +1023,18 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group)
u64 max_bytes;
u64 bitmap_bytes;
u64 extent_bytes;
u64 size = block_group->key.offset;
/*
* The goal is to keep the total amount of memory used per 1gb of space
* at or below 32k, so we need to adjust how much memory we allow to be
* used by extent based free space tracking
*/
max_bytes = MAX_CACHE_BYTES_PER_GIG *
(div64_u64(block_group->key.offset, 1024 * 1024 * 1024));
if (size < 1024 * 1024 * 1024)
max_bytes = MAX_CACHE_BYTES_PER_GIG;
else
max_bytes = MAX_CACHE_BYTES_PER_GIG *
div64_u64(size, 1024 * 1024 * 1024);
/*
* we want to account for 1 more bitmap than what we have so we can make
@ -1171,6 +1182,16 @@ static void add_new_bitmap(struct btrfs_block_group_cache *block_group,
recalculate_thresholds(block_group);
}
static void free_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *bitmap_info)
{
unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *bitmap_info,
u64 *offset, u64 *bytes)
@ -1195,6 +1216,7 @@ again:
*/
search_start = *offset;
search_bytes = *bytes;
search_bytes = min(search_bytes, end - search_start + 1);
ret = search_bitmap(block_group, bitmap_info, &search_start,
&search_bytes);
BUG_ON(ret < 0 || search_start != *offset);
@ -1211,13 +1233,8 @@ again:
if (*bytes) {
struct rb_node *next = rb_next(&bitmap_info->offset_index);
if (!bitmap_info->bytes) {
unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
if (!bitmap_info->bytes)
free_bitmap(block_group, bitmap_info);
/*
* no entry after this bitmap, but we still have bytes to
@ -1250,13 +1267,8 @@ again:
return -EAGAIN;
goto again;
} else if (!bitmap_info->bytes) {
unlink_free_space(block_group, bitmap_info);
kfree(bitmap_info->bitmap);
kfree(bitmap_info);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
} else if (!bitmap_info->bytes)
free_bitmap(block_group, bitmap_info);
return 0;
}
@ -1359,22 +1371,14 @@ out:
return ret;
}
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
bool try_merge_free_space(struct btrfs_block_group_cache *block_group,
struct btrfs_free_space *info, bool update_stat)
{
struct btrfs_free_space *right_info = NULL;
struct btrfs_free_space *left_info = NULL;
struct btrfs_free_space *info = NULL;
int ret = 0;
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
if (!info)
return -ENOMEM;
info->offset = offset;
info->bytes = bytes;
spin_lock(&block_group->tree_lock);
struct btrfs_free_space *left_info;
struct btrfs_free_space *right_info;
bool merged = false;
u64 offset = info->offset;
u64 bytes = info->bytes;
/*
* first we want to see if there is free space adjacent to the range we
@ -1388,37 +1392,62 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
else
left_info = tree_search_offset(block_group, offset - 1, 0, 0);
/*
* If there was no extent directly to the left or right of this new
* extent then we know we're going to have to allocate a new extent, so
* before we do that see if we need to drop this into a bitmap
*/
if ((!left_info || left_info->bitmap) &&
(!right_info || right_info->bitmap)) {
ret = insert_into_bitmap(block_group, info);
if (ret < 0) {
goto out;
} else if (ret) {
ret = 0;
goto out;
}
}
if (right_info && !right_info->bitmap) {
unlink_free_space(block_group, right_info);
if (update_stat)
unlink_free_space(block_group, right_info);
else
__unlink_free_space(block_group, right_info);
info->bytes += right_info->bytes;
kfree(right_info);
merged = true;
}
if (left_info && !left_info->bitmap &&
left_info->offset + left_info->bytes == offset) {
unlink_free_space(block_group, left_info);
if (update_stat)
unlink_free_space(block_group, left_info);
else
__unlink_free_space(block_group, left_info);
info->offset = left_info->offset;
info->bytes += left_info->bytes;
kfree(left_info);
merged = true;
}
return merged;
}
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes)
{
struct btrfs_free_space *info;
int ret = 0;
info = kzalloc(sizeof(struct btrfs_free_space), GFP_NOFS);
if (!info)
return -ENOMEM;
info->offset = offset;
info->bytes = bytes;
spin_lock(&block_group->tree_lock);
if (try_merge_free_space(block_group, info, true))
goto link;
/*
* There was no extent directly to the left or right of this new
* extent then we know we're going to have to allocate a new extent, so
* before we do that see if we need to drop this into a bitmap
*/
ret = insert_into_bitmap(block_group, info);
if (ret < 0) {
goto out;
} else if (ret) {
ret = 0;
goto out;
}
link:
ret = link_free_space(block_group, info);
if (ret)
kfree(info);
@ -1621,6 +1650,7 @@ __btrfs_return_cluster_to_free_space(
node = rb_next(&entry->offset_index);
rb_erase(&entry->offset_index, &cluster->root);
BUG_ON(entry->bitmap);
try_merge_free_space(block_group, entry, false);
tree_insert_offset(&block_group->free_space_offset,
entry->offset, &entry->offset_index, 0);
}
@ -1685,13 +1715,8 @@ u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
ret = offset;
if (entry->bitmap) {
bitmap_clear_bits(block_group, entry, offset, bytes);
if (!entry->bytes) {
unlink_free_space(block_group, entry);
kfree(entry->bitmap);
kfree(entry);
block_group->total_bitmaps--;
recalculate_thresholds(block_group);
}
if (!entry->bytes)
free_bitmap(block_group, entry);
} else {
unlink_free_space(block_group, entry);
entry->offset += bytes;
@ -1789,6 +1814,8 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
ret = search_start;
bitmap_clear_bits(block_group, entry, ret, bytes);
if (entry->bytes == 0)
free_bitmap(block_group, entry);
out:
spin_unlock(&cluster->lock);
spin_unlock(&block_group->tree_lock);
@ -1842,15 +1869,26 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
entry->offset += bytes;
entry->bytes -= bytes;
if (entry->bytes == 0) {
if (entry->bytes == 0)
rb_erase(&entry->offset_index, &cluster->root);
kfree(entry);
}
break;
}
out:
spin_unlock(&cluster->lock);
if (!ret)
return 0;
spin_lock(&block_group->tree_lock);
block_group->free_space -= bytes;
if (entry->bytes == 0) {
block_group->free_extents--;
kfree(entry);
}
spin_unlock(&block_group->tree_lock);
return ret;
}

View File

@ -416,7 +416,7 @@ again:
}
if (start == 0) {
trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
@ -612,6 +612,7 @@ retry:
GFP_NOFS);
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
ret = btrfs_reserve_extent(trans, root,
async_extent->compressed_size,
async_extent->compressed_size,
@ -771,7 +772,7 @@ static noinline int cow_file_range(struct inode *inode,
BUG_ON(root == root->fs_info->tree_root);
trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
@ -1049,7 +1050,7 @@ static noinline int run_delalloc_nocow(struct inode *inode,
} else {
trans = btrfs_join_transaction(root, 1);
}
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
cow_start = (u64)-1;
cur_offset = start;
@ -1557,6 +1558,7 @@ out:
out_page:
unlock_page(page);
page_cache_release(page);
kfree(fixup);
}
/*
@ -1703,7 +1705,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
trans = btrfs_join_transaction_nolock(root, 1);
else
trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
ret = btrfs_update_inode(trans, root, inode);
@ -1720,6 +1722,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
trans = btrfs_join_transaction_nolock(root, 1);
else
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
@ -2354,6 +2357,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
*/
if (is_bad_inode(inode)) {
trans = btrfs_start_transaction(root, 0);
BUG_ON(IS_ERR(trans));
btrfs_orphan_del(trans, inode);
btrfs_end_transaction(trans, root);
iput(inode);
@ -2381,6 +2385,7 @@ void btrfs_orphan_cleanup(struct btrfs_root *root)
if (root->orphan_block_rsv || root->orphan_item_inserted) {
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
btrfs_end_transaction(trans, root);
}
@ -2641,7 +2646,7 @@ int btrfs_unlink_inode(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
goto err;
goto out;
}
path->leave_spinning = 1;
@ -2714,9 +2719,10 @@ static int check_path_shared(struct btrfs_root *root,
struct extent_buffer *eb;
int level;
u64 refs = 1;
int uninitialized_var(ret);
for (level = 0; level < BTRFS_MAX_LEVEL; level++) {
int ret;
if (!path->nodes[level])
break;
eb = path->nodes[level];
@ -2727,7 +2733,7 @@ static int check_path_shared(struct btrfs_root *root,
if (refs > 1)
return 1;
}
return ret; /* XXX callers? */
return 0;
}
/*
@ -4134,7 +4140,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
}
srcu_read_unlock(&root->fs_info->subvol_srcu, index);
if (root != sub_root) {
if (!IS_ERR(inode) && root != sub_root) {
down_read(&root->fs_info->cleanup_work_sem);
if (!(inode->i_sb->s_flags & MS_RDONLY))
btrfs_orphan_cleanup(sub_root);
@ -4347,6 +4353,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
trans = btrfs_join_transaction_nolock(root, 1);
else
trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_set_trans_block_group(trans, inode);
if (nolock)
ret = btrfs_end_transaction_nolock(trans, root);
@ -4372,6 +4380,7 @@ void btrfs_dirty_inode(struct inode *inode)
return;
trans = btrfs_join_transaction(root, 1);
BUG_ON(IS_ERR(trans));
btrfs_set_trans_block_group(trans, inode);
ret = btrfs_update_inode(trans, root, inode);
@ -5176,6 +5185,8 @@ again:
em = NULL;
btrfs_release_path(root, path);
trans = btrfs_join_transaction(root, 1);
if (IS_ERR(trans))
return ERR_CAST(trans);
goto again;
}
map = kmap(page);
@ -5280,8 +5291,8 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
trans = btrfs_join_transaction(root, 0);
if (!trans)
return ERR_PTR(-ENOMEM);
if (IS_ERR(trans))
return ERR_CAST(trans);
trans->block_rsv = &root->fs_info->delalloc_block_rsv;
@ -5505,7 +5516,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
* while we look for nocow cross refs
*/
trans = btrfs_join_transaction(root, 0);
if (!trans)
if (IS_ERR(trans))
goto must_cow;
if (can_nocow_odirect(trans, inode, start, len) == 1) {
@ -5640,7 +5651,7 @@ again:
BUG_ON(!ordered);
trans = btrfs_join_transaction(root, 1);
if (!trans) {
if (IS_ERR(trans)) {
err = -ENOMEM;
goto out;
}

View File

@ -203,7 +203,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
trans = btrfs_join_transaction(root, 1);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
@ -907,6 +907,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
if (new_size > old_size) {
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto out_unlock;
}
ret = btrfs_grow_device(trans, device, new_size);
btrfs_commit_transaction(trans, root);
} else {
@ -1898,7 +1902,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
memcpy(&new_key, &key, sizeof(new_key));
new_key.objectid = inode->i_ino;
new_key.offset = key.offset + destoff - off;
if (off <= key.offset)
new_key.offset = key.offset + destoff - off;
else
new_key.offset = destoff;
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
@ -2082,7 +2089,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
ret = -ENOMEM;
trans = btrfs_start_ioctl_transaction(root, 0);
if (!trans)
if (IS_ERR(trans))
goto out_drop;
file->private_data = trans;
@ -2138,9 +2145,9 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
path->leave_spinning = 1;
trans = btrfs_start_transaction(root, 1);
if (!trans) {
if (IS_ERR(trans)) {
btrfs_free_path(path);
return -ENOMEM;
return PTR_ERR(trans);
}
dir_id = btrfs_super_root_dir(&root->fs_info->super_copy);
@ -2334,6 +2341,8 @@ static noinline long btrfs_ioctl_start_sync(struct file *file, void __user *argp
u64 transid;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
transid = trans->transid;
btrfs_commit_transaction_async(trans, root, 0);

View File

@ -141,7 +141,7 @@ static inline struct rb_node *tree_search(struct btrfs_ordered_inode_tree *tree,
u64 file_offset)
{
struct rb_root *root = &tree->tree;
struct rb_node *prev;
struct rb_node *prev = NULL;
struct rb_node *ret;
struct btrfs_ordered_extent *entry;

View File

@ -260,6 +260,7 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
#else
BUG();
#endif
break;
case BTRFS_BLOCK_GROUP_ITEM_KEY:
bi = btrfs_item_ptr(l, i,
struct btrfs_block_group_item);

View File

@ -2028,6 +2028,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
while (1) {
trans = btrfs_start_transaction(root, 0);
BUG_ON(IS_ERR(trans));
trans->block_rsv = rc->block_rsv;
ret = btrfs_block_rsv_check(trans, root, rc->block_rsv,
@ -2147,6 +2148,12 @@ again:
}
trans = btrfs_join_transaction(rc->extent_root, 1);
if (IS_ERR(trans)) {
if (!err)
btrfs_block_rsv_release(rc->extent_root,
rc->block_rsv, num_bytes);
return PTR_ERR(trans);
}
if (!err) {
if (num_bytes != rc->merging_rsv_size) {
@ -3222,6 +3229,7 @@ truncate:
trans = btrfs_join_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_free_path(path);
ret = PTR_ERR(trans);
goto out;
}
@ -3628,6 +3636,7 @@ int prepare_to_relocate(struct reloc_control *rc)
set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root, 1);
BUG_ON(IS_ERR(trans));
btrfs_commit_transaction(trans, rc->extent_root);
return 0;
}
@ -3657,6 +3666,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
while (1) {
trans = btrfs_start_transaction(rc->extent_root, 0);
BUG_ON(IS_ERR(trans));
if (update_backref_cache(trans, &rc->backref_cache)) {
btrfs_end_transaction(trans, rc->extent_root);
@ -3804,7 +3814,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
/* get rid of pinned extents */
trans = btrfs_join_transaction(rc->extent_root, 1);
btrfs_commit_transaction(trans, rc->extent_root);
if (IS_ERR(trans))
err = PTR_ERR(trans);
else
btrfs_commit_transaction(trans, rc->extent_root);
out_free:
btrfs_free_block_rsv(rc->extent_root, rc->block_rsv);
btrfs_free_path(path);
@ -4022,6 +4035,7 @@ static noinline_for_stack int mark_garbage_root(struct btrfs_root *root)
int ret;
trans = btrfs_start_transaction(root->fs_info->tree_root, 0);
BUG_ON(IS_ERR(trans));
memset(&root->root_item.drop_progress, 0,
sizeof(root->root_item.drop_progress));
@ -4125,6 +4139,11 @@ int btrfs_recover_relocation(struct btrfs_root *root)
set_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root, 1);
if (IS_ERR(trans)) {
unset_reloc_control(rc);
err = PTR_ERR(trans);
goto out_free;
}
rc->merge_reloc_tree = 1;
@ -4154,9 +4173,13 @@ int btrfs_recover_relocation(struct btrfs_root *root)
unset_reloc_control(rc);
trans = btrfs_join_transaction(rc->extent_root, 1);
btrfs_commit_transaction(trans, rc->extent_root);
out:
if (IS_ERR(trans))
err = PTR_ERR(trans);
else
btrfs_commit_transaction(trans, rc->extent_root);
out_free:
kfree(rc);
out:
while (!list_empty(&reloc_roots)) {
reloc_root = list_entry(reloc_roots.next,
struct btrfs_root, root_list);

View File

@ -383,7 +383,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
struct btrfs_fs_devices **fs_devices)
{
substring_t args[MAX_OPT_ARGS];
char *opts, *p;
char *opts, *orig, *p;
int error = 0;
int intarg;
@ -397,6 +397,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
opts = kstrdup(options, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((p = strsep(&opts, ",")) != NULL) {
int token;
@ -432,7 +433,7 @@ static int btrfs_parse_early_options(const char *options, fmode_t flags,
}
out_free_opts:
kfree(opts);
kfree(orig);
out:
/*
* If no subvolume name is specified we use the default one. Allocate
@ -623,6 +624,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
btrfs_wait_ordered_extents(root, 0, 0);
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
ret = btrfs_commit_transaction(trans, root);
return ret;
}
@ -761,6 +764,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
}
btrfs_close_devices(fs_devices);
kfree(fs_info);
kfree(tree_root);
} else {
char b[BDEVNAME_SIZE];

View File

@ -1161,6 +1161,11 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
INIT_DELAYED_WORK(&ac->work, do_async_commit);
ac->root = root;
ac->newtrans = btrfs_join_transaction(root, 0);
if (IS_ERR(ac->newtrans)) {
int err = PTR_ERR(ac->newtrans);
kfree(ac);
return err;
}
/* take transaction reference */
mutex_lock(&root->fs_info->trans_mutex);

View File

@ -338,6 +338,12 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
}
dst_copy = kmalloc(item_size, GFP_NOFS);
src_copy = kmalloc(item_size, GFP_NOFS);
if (!dst_copy || !src_copy) {
btrfs_release_path(root, path);
kfree(dst_copy);
kfree(src_copy);
return -ENOMEM;
}
read_extent_buffer(eb, src_copy, src_ptr, item_size);
@ -665,6 +671,9 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
btrfs_dir_item_key_to_cpu(leaf, di, &location);
name_len = btrfs_dir_name_len(leaf, di);
name = kmalloc(name_len, GFP_NOFS);
if (!name)
return -ENOMEM;
read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len);
btrfs_release_path(root, path);
@ -744,6 +753,9 @@ static noinline int backref_in_log(struct btrfs_root *log,
int match = 0;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
ret = btrfs_search_slot(NULL, log, key, path, 0, 0);
if (ret != 0)
goto out;
@ -967,6 +979,8 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
key.offset = (u64)-1;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
while (1) {
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
@ -1178,6 +1192,9 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
name_len = btrfs_dir_name_len(eb, di);
name = kmalloc(name_len, GFP_NOFS);
if (!name)
return -ENOMEM;
log_type = btrfs_dir_type(eb, di);
read_extent_buffer(eb, name, (unsigned long)(di + 1),
name_len);
@ -1692,6 +1709,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
root_owner = btrfs_header_owner(parent);
next = btrfs_find_create_tree_block(root, bytenr, blocksize);
if (!next)
return -ENOMEM;
if (*level == 1) {
wc->process_func(root, next, wc, ptr_gen);
@ -2032,6 +2051,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
wait_log_commit(trans, log_root_tree,
log_root_tree->log_transid);
mutex_unlock(&log_root_tree->log_mutex);
ret = 0;
goto out;
}
atomic_set(&log_root_tree->log_commit[index2], 1);
@ -2096,7 +2116,7 @@ out:
smp_mb();
if (waitqueue_active(&root->log_commit_wait[index1]))
wake_up(&root->log_commit_wait[index1]);
return 0;
return ret;
}
static void free_log_tree(struct btrfs_trans_handle *trans,
@ -2194,6 +2214,9 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
log = root->log_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino,
name, name_len, -1);
if (IS_ERR(di)) {
@ -2594,6 +2617,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
nr * sizeof(u32), GFP_NOFS);
if (!ins_data)
return -ENOMEM;
ins_sizes = (u32 *)ins_data;
ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32));
@ -2725,7 +2751,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
log = root->log_root;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
dst_path = btrfs_alloc_path();
if (!dst_path) {
btrfs_free_path(path);
return -ENOMEM;
}
min_key.objectid = inode->i_ino;
min_key.type = BTRFS_INODE_ITEM_KEY;
@ -3080,6 +3112,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
BUG_ON(!path);
trans = btrfs_start_transaction(fs_info->tree_root, 0);
BUG_ON(IS_ERR(trans));
wc.trans = trans;
wc.pin = 1;

View File

@ -1213,6 +1213,10 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
return -ENOMEM;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
btrfs_free_path(path);
return PTR_ERR(trans);
}
key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
key.type = BTRFS_DEV_ITEM_KEY;
key.offset = device->devid;
@ -1606,6 +1610,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
}
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
kfree(device);
ret = PTR_ERR(trans);
goto error;
}
lock_chunks(root);
device->writeable = 1;
@ -1873,7 +1883,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
return ret;
trans = btrfs_start_transaction(root, 0);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
lock_chunks(root);
@ -2047,7 +2057,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
BUG_ON(ret);
trans = btrfs_start_transaction(dev_root, 0);
BUG_ON(!trans);
BUG_ON(IS_ERR(trans));
ret = btrfs_grow_device(trans, device, old_size);
BUG_ON(ret);
@ -2213,6 +2223,11 @@ again:
/* Shrinking succeeded, else we would be at "done". */
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
goto done;
}
lock_chunks(root);
device->disk_total_bytes = new_size;

View File

@ -372,6 +372,10 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
GFP_KERNEL);
if (!ppace) {
cERROR(1, "DACL memory allocation error");
return;
}
for (i = 0; i < num_aces; ++i) {
ppace[i] = (struct cifs_ace *) (acl_base + acl_size);

View File

@ -136,9 +136,6 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
}
}
if (ses->status == CifsExiting)
return -EIO;
/*
* Give demultiplex thread up to 10 seconds to reconnect, should be
* greater than cifs socket timeout which is 7 seconds
@ -156,7 +153,7 @@ cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command)
* retrying until process is killed or server comes
* back on-line
*/
if (!tcon->retry || ses->status == CifsExiting) {
if (!tcon->retry) {
cFYI(1, "gave up waiting on reconnect in smb_init");
return -EHOSTDOWN;
}

View File

@ -337,8 +337,12 @@ cifs_echo_request(struct work_struct *work)
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work);
/* no need to ping if we got a response recently */
if (time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
/*
* We cannot send an echo until the NEGOTIATE_PROTOCOL request is done.
* Also, no need to ping if we got a response recently
*/
if (server->tcpStatus != CifsGood ||
time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
goto requeue_echo;
rc = CIFSSMBEcho(server);
@ -578,12 +582,12 @@ incomplete_rcv:
else if (reconnect == 1)
continue;
length += 4; /* account for rfc1002 hdr */
total_read += 4; /* account for rfc1002 hdr */
dump_smb(smb_buffer, length);
if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
dump_smb(smb_buffer, total_read);
if (checkSMB(smb_buffer, smb_buffer->Mid, total_read)) {
cifs_dump_mem("Bad SMB: ", smb_buffer,
total_read < 48 ? total_read : 48);
continue;
}
@ -633,11 +637,11 @@ incomplete_rcv:
mid_entry->largeBuf = isLargeBuf;
multi_t2_fnd:
mid_entry->midState = MID_RESPONSE_RECEIVED;
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
#ifdef CONFIG_CIFS_STATS2
mid_entry->when_received = jiffies;
#endif
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
break;
}
mid_entry = NULL;

View File

@ -1662,10 +1662,10 @@ static ssize_t
cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
size_t total_written = 0;
unsigned int written = 0;
unsigned long num_pages, npages;
size_t copied, len, cur_len, i;
unsigned int written;
unsigned long num_pages, npages, i;
size_t copied, len, cur_len;
ssize_t total_written = 0;
struct kvec *to_send;
struct page **pages;
struct iov_iter it;
@ -1821,7 +1821,8 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
{
int rc;
int xid;
unsigned int total_read, bytes_read = 0;
ssize_t total_read;
unsigned int bytes_read = 0;
size_t len, cur_len;
int iov_offset = 0;
struct cifs_sb_info *cifs_sb;

View File

@ -359,6 +359,10 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
if (rc)
return rc;
/* enable signing if server requires it */
if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
mutex_lock(&server->srv_mutex);
mid = AllocMidQEntry(in_buf, server);
if (mid == NULL) {

View File

@ -124,7 +124,8 @@
#endif
#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
#define FTRACE_EVENTS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
#else
@ -140,7 +141,8 @@
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
#define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
#define TRACE_SYSCALLS() . = ALIGN(8); \
VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
*(__syscalls_metadata) \
VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
#else
@ -165,10 +167,8 @@
CPU_KEEP(exit.data) \
MEM_KEEP(init.data) \
MEM_KEEP(exit.data) \
. = ALIGN(32); \
VMLINUX_SYMBOL(__start___tracepoints) = .; \
STRUCT_ALIGN(); \
*(__tracepoints) \
VMLINUX_SYMBOL(__stop___tracepoints) = .; \
/* implement dynamic printk debug */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___verbose) = .; \
@ -176,13 +176,7 @@
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
TRACE_PRINTKS() \
\
STRUCT_ALIGN(); \
FTRACE_EVENTS() \
\
STRUCT_ALIGN(); \
TRACE_SYSCALLS()
TRACE_PRINTKS()
/*
* Data section helpers
@ -220,6 +214,10 @@
VMLINUX_SYMBOL(__start_rodata) = .; \
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
*(__tracepoints_ptrs) /* Tracepoints: pointer array */\
VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .; \
*(__markers_strings) /* Markers: strings */ \
*(__tracepoints_strings)/* Tracepoints: strings */ \
} \
@ -482,6 +480,8 @@
KERNEL_CTORS() \
*(.init.rodata) \
MCOUNT_REC() \
FTRACE_EVENTS() \
TRACE_SYSCALLS() \
DEV_DISCARD(init.rodata) \
CPU_DISCARD(init.rodata) \
MEM_DISCARD(init.rodata) \

View File

@ -57,7 +57,7 @@
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
*
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
*/
#define IRQF_DISABLED 0x00000020
#define IRQF_SAMPLE_RANDOM 0x00000040
@ -69,6 +69,7 @@
#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
#define IRQF_NO_SUSPEND 0x00004000
#define IRQF_FORCE_RESUME 0x00008000
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)

View File

@ -377,7 +377,7 @@ struct module
keeping pointers to this stuff */
char *args;
#ifdef CONFIG_TRACEPOINTS
struct tracepoint *tracepoints;
struct tracepoint * const *tracepoints_ptrs;
unsigned int num_tracepoints;
#endif
#ifdef HAVE_JUMP_LABEL
@ -389,7 +389,7 @@ struct module
unsigned int num_trace_bprintk_fmt;
#endif
#ifdef CONFIG_EVENT_TRACING
struct ftrace_event_call *trace_events;
struct ftrace_event_call **trace_events;
unsigned int num_trace_events;
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD

View File

@ -125,39 +125,37 @@ extern struct trace_event_functions enter_syscall_print_funcs;
extern struct trace_event_functions exit_syscall_print_funcs;
#define SYSCALL_TRACE_ENTER_EVENT(sname) \
static struct syscall_metadata \
__attribute__((__aligned__(4))) __syscall_meta_##sname; \
static struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \
event_enter_##sname = { \
.name = "sys_enter"#sname, \
.class = &event_class_syscall_enter, \
.event.funcs = &enter_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_enter_##sname = &event_enter_##sname; \
__TRACE_EVENT_FLAGS(enter_##sname, TRACE_EVENT_FL_CAP_ANY)
#define SYSCALL_TRACE_EXIT_EVENT(sname) \
static struct syscall_metadata \
__attribute__((__aligned__(4))) __syscall_meta_##sname; \
static struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \
event_exit_##sname = { \
.name = "sys_exit"#sname, \
.class = &event_class_syscall_exit, \
.event.funcs = &exit_syscall_print_funcs, \
.data = (void *)&__syscall_meta_##sname,\
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) \
*__event_exit_##sname = &event_exit_##sname; \
__TRACE_EVENT_FLAGS(exit_##sname, TRACE_EVENT_FL_CAP_ANY)
#define SYSCALL_METADATA(sname, nb) \
SYSCALL_TRACE_ENTER_EVENT(sname); \
SYSCALL_TRACE_EXIT_EVENT(sname); \
static struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \
.name = "sys"#sname, \
.nb_args = nb, \
@ -166,14 +164,15 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.enter_event = &event_enter_##sname, \
.exit_event = &event_exit_##sname, \
.enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
};
}; \
static struct syscall_metadata __used \
__attribute__((section("__syscalls_metadata"))) \
*__p_syscall_meta_##sname = &__syscall_meta_##sname;
#define SYSCALL_DEFINE0(sname) \
SYSCALL_TRACE_ENTER_EVENT(_##sname); \
SYSCALL_TRACE_EXIT_EVENT(_##sname); \
static struct syscall_metadata __used \
__attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \
__syscall_meta__##sname = { \
.name = "sys_"#sname, \
.nb_args = 0, \
@ -181,6 +180,9 @@ extern struct trace_event_functions exit_syscall_print_funcs;
.exit_event = &event_exit__##sname, \
.enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
}; \
static struct syscall_metadata __used \
__attribute__((section("__syscalls_metadata"))) \
*__p_syscall_meta_##sname = &__syscall_meta__##sname; \
asmlinkage long sys_##sname(void)
#else
#define SYSCALL_DEFINE0(name) asmlinkage long sys_##name(void)

View File

@ -33,12 +33,7 @@ struct tracepoint {
void (*regfunc)(void);
void (*unregfunc)(void);
struct tracepoint_func __rcu *funcs;
} __attribute__((aligned(32))); /*
* Aligned on 32 bytes because it is
* globally visible and gcc happily
* align these on the structure size.
* Keep in sync with vmlinux.lds.h.
*/
};
/*
* Connect a probe to a tracepoint.
@ -61,15 +56,15 @@ extern void tracepoint_probe_update_all(void);
struct tracepoint_iter {
struct module *module;
struct tracepoint *tracepoint;
struct tracepoint * const *tracepoint;
};
extern void tracepoint_iter_start(struct tracepoint_iter *iter);
extern void tracepoint_iter_next(struct tracepoint_iter *iter);
extern void tracepoint_iter_stop(struct tracepoint_iter *iter);
extern void tracepoint_iter_reset(struct tracepoint_iter *iter);
extern int tracepoint_get_iter_range(struct tracepoint **tracepoint,
struct tracepoint *begin, struct tracepoint *end);
extern int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
struct tracepoint * const *begin, struct tracepoint * const *end);
/*
* tracepoint_synchronize_unregister must be called between the last tracepoint
@ -84,11 +79,13 @@ static inline void tracepoint_synchronize_unregister(void)
#define PARAMS(args...) args
#ifdef CONFIG_TRACEPOINTS
extern void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end);
extern
void tracepoint_update_probe_range(struct tracepoint * const *begin,
struct tracepoint * const *end);
#else
static inline void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end)
static inline
void tracepoint_update_probe_range(struct tracepoint * const *begin,
struct tracepoint * const *end)
{ }
#endif /* CONFIG_TRACEPOINTS */
@ -174,12 +171,20 @@ do_trace: \
{ \
}
/*
* We have no guarantee that gcc and the linker won't up-align the tracepoint
* structures, so we create an array of pointers that will be used for iteration
* on the tracepoints.
*/
#define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \
struct tracepoint __tracepoint_##name \
__attribute__((section("__tracepoints"), aligned(32))) = \
{ __tpstrtab_##name, 0, reg, unreg, NULL }
__attribute__((section("__tracepoints"))) = \
{ __tpstrtab_##name, 0, reg, unreg, NULL }; \
static struct tracepoint * const __tracepoint_ptr_##name __used \
__attribute__((section("__tracepoints_ptrs"))) = \
&__tracepoint_##name;
#define DEFINE_TRACE(name) \
DEFINE_TRACE_FN(name, NULL, NULL);

View File

@ -446,14 +446,16 @@ static inline notrace int ftrace_get_offsets_##call( \
* .reg = ftrace_event_reg,
* };
*
* static struct ftrace_event_call __used
* __attribute__((__aligned__(4)))
* __attribute__((section("_ftrace_events"))) event_<call> = {
* static struct ftrace_event_call event_<call> = {
* .name = "<call>",
* .class = event_class_<template>,
* .event = &ftrace_event_type_<call>,
* .print_fmt = print_fmt_<call>,
* };
* // its only safe to use pointers when doing linker tricks to
* // create an array.
* static struct ftrace_event_call __used
* __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
*
*/
@ -579,28 +581,28 @@ static struct ftrace_event_class __used event_class_##call = { \
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, call, proto, args) \
\
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
static struct ftrace_event_call __used event_##call = { \
.name = #call, \
.class = &event_class_##template, \
.event.funcs = &ftrace_event_type_funcs_##template, \
.print_fmt = print_fmt_##template, \
};
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#undef DEFINE_EVENT_PRINT
#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
\
static const char print_fmt_##call[] = print; \
\
static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
static struct ftrace_event_call __used event_##call = { \
.name = #call, \
.class = &event_class_##template, \
.event.funcs = &ftrace_event_type_funcs_##call, \
.print_fmt = print_fmt_##call, \
}
}; \
static struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)

View File

@ -252,13 +252,13 @@ struct cred *cred_alloc_blank(void)
#endif
atomic_set(&new->usage, 1);
#ifdef CONFIG_DEBUG_CREDENTIALS
new->magic = CRED_MAGIC;
#endif
if (security_cred_alloc_blank(new, GFP_KERNEL) < 0)
goto error;
#ifdef CONFIG_DEBUG_CREDENTIALS
new->magic = CRED_MAGIC;
#endif
return new;
error:
@ -657,6 +657,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
validate_creds(old);
*new = *old;
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_uid(new->user);
get_group_info(new->group_info);
@ -674,8 +676,6 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
if (security_prepare_creds(new, old, GFP_KERNEL) < 0)
goto error;
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
put_cred(old);
validate_creds(new);
return new;
@ -748,7 +748,11 @@ bool creds_are_invalid(const struct cred *cred)
if (cred->magic != CRED_MAGIC)
return true;
#ifdef CONFIG_SECURITY_SELINUX
if (selinux_is_enabled()) {
/*
* cred->security == NULL if security_cred_alloc_blank() or
* security_prepare_creds() returned an error.
*/
if (selinux_is_enabled() && cred->security) {
if ((unsigned long) cred->security < PAGE_SIZE)
return true;
if ((*(u32 *)cred->security & 0xffffff00) ==

View File

@ -359,8 +359,17 @@ EXPORT_SYMBOL(disable_irq);
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
{
if (resume)
if (resume) {
if (!(desc->status & IRQ_SUSPENDED)) {
if (!desc->action)
return;
if (!(desc->action->flags & IRQF_FORCE_RESUME))
return;
/* Pretend that it got disabled ! */
desc->depth++;
}
desc->status &= ~IRQ_SUSPENDED;
}
switch (desc->depth) {
case 0:

View File

@ -53,9 +53,6 @@ void resume_device_irqs(void)
for_each_irq_desc(irq, desc) {
unsigned long flags;
if (!(desc->status & IRQ_SUSPENDED))
continue;
raw_spin_lock_irqsave(&desc->lock, flags);
__enable_irq(desc, irq, true);
raw_spin_unlock_irqrestore(&desc->lock, flags);

View File

@ -2460,9 +2460,9 @@ static void find_module_sections(struct module *mod, struct load_info *info)
#endif
#ifdef CONFIG_TRACEPOINTS
mod->tracepoints = section_objs(info, "__tracepoints",
sizeof(*mod->tracepoints),
&mod->num_tracepoints);
mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
sizeof(*mod->tracepoints_ptrs),
&mod->num_tracepoints);
#endif
#ifdef HAVE_JUMP_LABEL
mod->jump_entries = section_objs(info, "__jump_table",
@ -3393,7 +3393,7 @@ void module_layout(struct module *mod,
struct modversion_info *ver,
struct kernel_param *kp,
struct kernel_symbol *ks,
struct tracepoint *tp)
struct tracepoint * const *tp)
{
}
EXPORT_SYMBOL(module_layout);
@ -3407,8 +3407,8 @@ void module_update_tracepoints(void)
mutex_lock(&module_mutex);
list_for_each_entry(mod, &modules, list)
if (!mod->taints)
tracepoint_update_probe_range(mod->tracepoints,
mod->tracepoints + mod->num_tracepoints);
tracepoint_update_probe_range(mod->tracepoints_ptrs,
mod->tracepoints_ptrs + mod->num_tracepoints);
mutex_unlock(&module_mutex);
}
@ -3432,8 +3432,8 @@ int module_get_iter_tracepoints(struct tracepoint_iter *iter)
else if (iter_mod > iter->module)
iter->tracepoint = NULL;
found = tracepoint_get_iter_range(&iter->tracepoint,
iter_mod->tracepoints,
iter_mod->tracepoints
iter_mod->tracepoints_ptrs,
iter_mod->tracepoints_ptrs
+ iter_mod->num_tracepoints);
if (found) {
iter->module = iter_mod;

View File

@ -969,10 +969,14 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
int del_timer_sync(struct timer_list *timer)
{
#ifdef CONFIG_LOCKDEP
unsigned long flags;
raw_local_irq_save(flags);
local_bh_disable();
lock_map_acquire(&timer->lockdep_map);
lock_map_release(&timer->lockdep_map);
local_bh_enable();
_local_bh_enable();
raw_local_irq_restore(flags);
#endif
/*
* don't use it in hardirq context, because it

View File

@ -1284,7 +1284,7 @@ trace_create_file_ops(struct module *mod)
static void trace_module_add_events(struct module *mod)
{
struct ftrace_module_file_ops *file_ops = NULL;
struct ftrace_event_call *call, *start, *end;
struct ftrace_event_call **call, **start, **end;
start = mod->trace_events;
end = mod->trace_events + mod->num_trace_events;
@ -1297,7 +1297,7 @@ static void trace_module_add_events(struct module *mod)
return;
for_each_event(call, start, end) {
__trace_add_event_call(call, mod,
__trace_add_event_call(*call, mod,
&file_ops->id, &file_ops->enable,
&file_ops->filter, &file_ops->format);
}
@ -1367,8 +1367,8 @@ static struct notifier_block trace_module_nb = {
.priority = 0,
};
extern struct ftrace_event_call __start_ftrace_events[];
extern struct ftrace_event_call __stop_ftrace_events[];
extern struct ftrace_event_call *__start_ftrace_events[];
extern struct ftrace_event_call *__stop_ftrace_events[];
static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
@ -1384,7 +1384,7 @@ __setup("trace_event=", setup_trace_event);
static __init int event_trace_init(void)
{
struct ftrace_event_call *call;
struct ftrace_event_call **call;
struct dentry *d_tracer;
struct dentry *entry;
struct dentry *d_events;
@ -1430,7 +1430,7 @@ static __init int event_trace_init(void)
pr_warning("tracing: Failed to allocate common fields");
for_each_event(call, __start_ftrace_events, __stop_ftrace_events) {
__trace_add_event_call(call, NULL, &ftrace_event_id_fops,
__trace_add_event_call(*call, NULL, &ftrace_event_id_fops,
&ftrace_enable_fops,
&ftrace_event_filter_fops,
&ftrace_event_format_fops);

View File

@ -161,13 +161,13 @@ struct ftrace_event_class event_class_ftrace_##call = { \
.fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
}; \
\
struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \
struct ftrace_event_call __used event_##call = { \
.name = #call, \
.event.type = etype, \
.class = &event_class_ftrace_##call, \
.print_fmt = print, \
}; \
struct ftrace_event_call __used \
__attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
#include "trace_entries.h"

View File

@ -55,20 +55,21 @@ struct ftrace_event_class event_class_syscall_exit = {
.raw_init = init_syscall_trace,
};
extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[];
extern struct syscall_metadata *__start_syscalls_metadata[];
extern struct syscall_metadata *__stop_syscalls_metadata[];
static struct syscall_metadata **syscalls_metadata;
static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
static __init struct syscall_metadata *
find_syscall_meta(unsigned long syscall)
{
struct syscall_metadata *start;
struct syscall_metadata *stop;
struct syscall_metadata **start;
struct syscall_metadata **stop;
char str[KSYM_SYMBOL_LEN];
start = (struct syscall_metadata *)__start_syscalls_metadata;
stop = (struct syscall_metadata *)__stop_syscalls_metadata;
start = __start_syscalls_metadata;
stop = __stop_syscalls_metadata;
kallsyms_lookup(syscall, NULL, NULL, NULL, str);
for ( ; start < stop; start++) {
@ -78,8 +79,8 @@ static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
* with "SyS" instead of "sys", leading to an unwanted
* mismatch.
*/
if (start->name && !strcmp(start->name + 3, str + 3))
return start;
if ((*start)->name && !strcmp((*start)->name + 3, str + 3))
return *start;
}
return NULL;
}

View File

@ -27,8 +27,8 @@
#include <linux/sched.h>
#include <linux/jump_label.h>
extern struct tracepoint __start___tracepoints[];
extern struct tracepoint __stop___tracepoints[];
extern struct tracepoint * const __start___tracepoints_ptrs[];
extern struct tracepoint * const __stop___tracepoints_ptrs[];
/* Set to 1 to enable tracepoint debug output */
static const int tracepoint_debug;
@ -298,10 +298,10 @@ static void disable_tracepoint(struct tracepoint *elem)
*
* Updates the probe callback corresponding to a range of tracepoints.
*/
void
tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
void tracepoint_update_probe_range(struct tracepoint * const *begin,
struct tracepoint * const *end)
{
struct tracepoint *iter;
struct tracepoint * const *iter;
struct tracepoint_entry *mark_entry;
if (!begin)
@ -309,12 +309,12 @@ tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
mutex_lock(&tracepoints_mutex);
for (iter = begin; iter < end; iter++) {
mark_entry = get_tracepoint(iter->name);
mark_entry = get_tracepoint((*iter)->name);
if (mark_entry) {
set_tracepoint(&mark_entry, iter,
set_tracepoint(&mark_entry, *iter,
!!mark_entry->refcount);
} else {
disable_tracepoint(iter);
disable_tracepoint(*iter);
}
}
mutex_unlock(&tracepoints_mutex);
@ -326,8 +326,8 @@ tracepoint_update_probe_range(struct tracepoint *begin, struct tracepoint *end)
static void tracepoint_update_probes(void)
{
/* Core kernel tracepoints */
tracepoint_update_probe_range(__start___tracepoints,
__stop___tracepoints);
tracepoint_update_probe_range(__start___tracepoints_ptrs,
__stop___tracepoints_ptrs);
/* tracepoints in modules. */
module_update_tracepoints();
}
@ -514,8 +514,8 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
* Will return the first tracepoint in the range if the input tracepoint is
* NULL.
*/
int tracepoint_get_iter_range(struct tracepoint **tracepoint,
struct tracepoint *begin, struct tracepoint *end)
int tracepoint_get_iter_range(struct tracepoint * const **tracepoint,
struct tracepoint * const *begin, struct tracepoint * const *end)
{
if (!*tracepoint && begin != end) {
*tracepoint = begin;
@ -534,7 +534,8 @@ static void tracepoint_get_iter(struct tracepoint_iter *iter)
/* Core kernel tracepoints */
if (!iter->module) {
found = tracepoint_get_iter_range(&iter->tracepoint,
__start___tracepoints, __stop___tracepoints);
__start___tracepoints_ptrs,
__stop___tracepoints_ptrs);
if (found)
goto end;
}
@ -585,8 +586,8 @@ int tracepoint_module_notify(struct notifier_block *self,
switch (val) {
case MODULE_STATE_COMING:
case MODULE_STATE_GOING:
tracepoint_update_probe_range(mod->tracepoints,
mod->tracepoints + mod->num_tracepoints);
tracepoint_update_probe_range(mod->tracepoints_ptrs,
mod->tracepoints_ptrs + mod->num_tracepoints);
break;
}
return 0;

View File

@ -3198,7 +3198,11 @@ static void selinux_cred_free(struct cred *cred)
{
struct task_security_struct *tsec = cred->security;
BUG_ON((unsigned long) cred->security < PAGE_SIZE);
/*
* cred->security == NULL if security_cred_alloc_blank() or
* security_prepare_creds() returned an error.
*/
BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
cred->security = (void *) 0x7UL;
kfree(tsec);
}

View File

@ -50,7 +50,11 @@ static void aaci_ac97_select_codec(struct aaci *aaci, struct snd_ac97 *ac97)
if (v & SLFR_1RXV)
readl(aaci->base + AACI_SL1RX);
writel(maincr, aaci->base + AACI_MAINCR);
if (maincr != readl(aaci->base + AACI_MAINCR)) {
writel(maincr, aaci->base + AACI_MAINCR);
readl(aaci->base + AACI_MAINCR);
udelay(1);
}
}
/*
@ -993,6 +997,8 @@ static unsigned int __devinit aaci_size_fifo(struct aaci *aaci)
* disabling the channel doesn't clear the FIFO.
*/
writel(aaci->maincr & ~MAINCR_IE, aaci->base + AACI_MAINCR);
readl(aaci->base + AACI_MAINCR);
udelay(1);
writel(aaci->maincr, aaci->base + AACI_MAINCR);
/*

View File

@ -55,14 +55,13 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/moduleparam.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/rawmidi.h>
#include <linux/delay.h>
#include <asm/io.h>
/*
* globals
*/

View File

@ -87,7 +87,7 @@ ifeq ($(CONFIG_PSS_HAVE_BOOT),y)
$(obj)/bin2hex pss_synth < $< > $@
else
$(obj)/pss_boot.h:
( \
$(Q)( \
echo 'static unsigned char * pss_synth = NULL;'; \
echo 'static int pss_synthLen = 0;'; \
) > $@
@ -102,7 +102,7 @@ ifeq ($(CONFIG_TRIX_HAVE_BOOT),y)
$(obj)/hex2hex -i trix_boot < $< > $@
else
$(obj)/trix_boot.h:
( \
$(Q)( \
echo 'static unsigned char * trix_boot = NULL;'; \
echo 'static int trix_boot_len = 0;'; \
) > $@

View File

@ -85,6 +85,7 @@ struct conexant_spec {
unsigned int auto_mic;
int auto_mic_ext; /* autocfg.inputs[] index for ext mic */
unsigned int need_dac_fix;
hda_nid_t slave_dig_outs[2];
/* capture */
unsigned int num_adc_nids;
@ -127,6 +128,7 @@ struct conexant_spec {
unsigned int ideapad:1;
unsigned int thinkpad:1;
unsigned int hp_laptop:1;
unsigned int asus:1;
unsigned int ext_mic_present;
unsigned int recording;
@ -352,6 +354,8 @@ static int conexant_build_pcms(struct hda_codec *codec)
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid =
spec->dig_in_nid;
}
if (spec->slave_dig_outs[0])
codec->slave_dig_outs = spec->slave_dig_outs;
}
return 0;
@ -403,10 +407,16 @@ static int conexant_add_jack(struct hda_codec *codec,
struct conexant_spec *spec;
struct conexant_jack *jack;
const char *name;
int err;
int i, err;
spec = codec->spec;
snd_array_init(&spec->jacks, sizeof(*jack), 32);
jack = spec->jacks.list;
for (i = 0; i < spec->jacks.used; i++, jack++)
if (jack->nid == nid)
return 0 ; /* already present */
jack = snd_array_new(&spec->jacks);
name = (type == SND_JACK_HEADPHONE) ? "Headphone" : "Mic" ;
@ -2100,7 +2110,7 @@ static int patch_cxt5051(struct hda_codec *codec)
static hda_nid_t cxt5066_dac_nids[1] = { 0x10 };
static hda_nid_t cxt5066_adc_nids[3] = { 0x14, 0x15, 0x16 };
static hda_nid_t cxt5066_capsrc_nids[1] = { 0x17 };
#define CXT5066_SPDIF_OUT 0x21
static hda_nid_t cxt5066_digout_pin_nids[2] = { 0x20, 0x22 };
/* OLPC's microphone port is DC coupled for use with external sensors,
* therefore we use a 50% mic bias in order to center the input signal with
@ -2312,6 +2322,19 @@ static void cxt5066_ideapad_automic(struct hda_codec *codec)
}
}
/* toggle input of built-in digital mic and mic jack appropriately */
static void cxt5066_asus_automic(struct hda_codec *codec)
{
unsigned int present;
present = snd_hda_jack_detect(codec, 0x1b);
snd_printdd("CXT5066: external microphone present=%d\n", present);
snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_CONNECT_SEL,
present ? 1 : 0);
}
/* toggle input of built-in digital mic and mic jack appropriately */
static void cxt5066_hp_laptop_automic(struct hda_codec *codec)
{
@ -2387,6 +2410,23 @@ static void cxt5066_hp_automute(struct hda_codec *codec)
cxt5066_update_speaker(codec);
}
/* Dispatch the right mic autoswitch function */
static void cxt5066_automic(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
if (spec->dell_vostro)
cxt5066_vostro_automic(codec);
else if (spec->ideapad)
cxt5066_ideapad_automic(codec);
else if (spec->thinkpad)
cxt5066_thinkpad_automic(codec);
else if (spec->hp_laptop)
cxt5066_hp_laptop_automic(codec);
else if (spec->asus)
cxt5066_asus_automic(codec);
}
/* unsolicited event for jack sensing */
static void cxt5066_olpc_unsol_event(struct hda_codec *codec, unsigned int res)
{
@ -2405,60 +2445,19 @@ static void cxt5066_olpc_unsol_event(struct hda_codec *codec, unsigned int res)
}
/* unsolicited event for jack sensing */
static void cxt5066_vostro_event(struct hda_codec *codec, unsigned int res)
static void cxt5066_unsol_event(struct hda_codec *codec, unsigned int res)
{
snd_printdd("CXT5066_vostro: unsol event %x (%x)\n", res, res >> 26);
snd_printdd("CXT5066: unsol event %x (%x)\n", res, res >> 26);
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5066_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
cxt5066_vostro_automic(codec);
cxt5066_automic(codec);
break;
}
}
/* unsolicited event for jack sensing */
static void cxt5066_ideapad_event(struct hda_codec *codec, unsigned int res)
{
snd_printdd("CXT5066_ideapad: unsol event %x (%x)\n", res, res >> 26);
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5066_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
cxt5066_ideapad_automic(codec);
break;
}
}
/* unsolicited event for jack sensing */
static void cxt5066_hp_laptop_event(struct hda_codec *codec, unsigned int res)
{
snd_printdd("CXT5066_hp_laptop: unsol event %x (%x)\n", res, res >> 26);
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5066_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
cxt5066_hp_laptop_automic(codec);
break;
}
}
/* unsolicited event for jack sensing */
static void cxt5066_thinkpad_event(struct hda_codec *codec, unsigned int res)
{
snd_printdd("CXT5066_thinkpad: unsol event %x (%x)\n", res, res >> 26);
switch (res >> 26) {
case CONEXANT_HP_EVENT:
cxt5066_hp_automute(codec);
break;
case CONEXANT_MIC_EVENT:
cxt5066_thinkpad_automic(codec);
break;
}
}
static const struct hda_input_mux cxt5066_analog_mic_boost = {
.num_items = 5,
@ -2633,6 +2632,27 @@ static void cxt5066_olpc_capture_cleanup(struct hda_codec *codec)
spec->recording = 0;
}
static void conexant_check_dig_outs(struct hda_codec *codec,
hda_nid_t *dig_pins,
int num_pins)
{
struct conexant_spec *spec = codec->spec;
hda_nid_t *nid_loc = &spec->multiout.dig_out_nid;
int i;
for (i = 0; i < num_pins; i++, dig_pins++) {
unsigned int cfg = snd_hda_codec_get_pincfg(codec, *dig_pins);
if (get_defcfg_connect(cfg) == AC_JACK_PORT_NONE)
continue;
if (snd_hda_get_connections(codec, *dig_pins, nid_loc, 1) != 1)
continue;
if (spec->slave_dig_outs[0])
nid_loc++;
else
nid_loc = spec->slave_dig_outs;
}
}
static struct hda_input_mux cxt5066_capture_source = {
.num_items = 4,
.items = {
@ -3039,20 +3059,11 @@ static struct hda_verb cxt5066_init_verbs_hp_laptop[] = {
/* initialize jack-sensing, too */
static int cxt5066_init(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
snd_printdd("CXT5066: init\n");
conexant_init(codec);
if (codec->patch_ops.unsol_event) {
cxt5066_hp_automute(codec);
if (spec->dell_vostro)
cxt5066_vostro_automic(codec);
else if (spec->ideapad)
cxt5066_ideapad_automic(codec);
else if (spec->thinkpad)
cxt5066_thinkpad_automic(codec);
else if (spec->hp_laptop)
cxt5066_hp_laptop_automic(codec);
cxt5066_automic(codec);
}
cxt5066_set_mic_boost(codec);
return 0;
@ -3080,6 +3091,7 @@ enum {
CXT5066_DELL_VOSTRO, /* Dell Vostro 1015i */
CXT5066_IDEAPAD, /* Lenovo IdeaPad U150 */
CXT5066_THINKPAD, /* Lenovo ThinkPad T410s, others? */
CXT5066_ASUS, /* Asus K52JU, Lenovo G560 - Int mic at 0x1a and Ext mic at 0x1b */
CXT5066_HP_LAPTOP, /* HP Laptop */
CXT5066_MODELS
};
@ -3091,6 +3103,7 @@ static const char * const cxt5066_models[CXT5066_MODELS] = {
[CXT5066_DELL_VOSTRO] = "dell-vostro",
[CXT5066_IDEAPAD] = "ideapad",
[CXT5066_THINKPAD] = "thinkpad",
[CXT5066_ASUS] = "asus",
[CXT5066_HP_LAPTOP] = "hp-laptop",
};
@ -3102,7 +3115,9 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP),
SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
SND_PCI_QUIRK(0x1043, 0x1993, "Asus U50F", CXT5066_ASUS),
SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
@ -3111,7 +3126,9 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
{}
};
@ -3133,7 +3150,8 @@ static int patch_cxt5066(struct hda_codec *codec)
spec->multiout.max_channels = 2;
spec->multiout.num_dacs = ARRAY_SIZE(cxt5066_dac_nids);
spec->multiout.dac_nids = cxt5066_dac_nids;
spec->multiout.dig_out_nid = CXT5066_SPDIF_OUT;
conexant_check_dig_outs(codec, cxt5066_digout_pin_nids,
ARRAY_SIZE(cxt5066_digout_pin_nids));
spec->num_adc_nids = 1;
spec->adc_nids = cxt5066_adc_nids;
spec->capsrc_nids = cxt5066_capsrc_nids;
@ -3167,17 +3185,20 @@ static int patch_cxt5066(struct hda_codec *codec)
spec->num_init_verbs++;
spec->dell_automute = 1;
break;
case CXT5066_ASUS:
case CXT5066_HP_LAPTOP:
codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_hp_laptop_event;
codec->patch_ops.unsol_event = cxt5066_unsol_event;
spec->init_verbs[spec->num_init_verbs] =
cxt5066_init_verbs_hp_laptop;
spec->num_init_verbs++;
spec->hp_laptop = 1;
spec->hp_laptop = board_config == CXT5066_HP_LAPTOP;
spec->asus = board_config == CXT5066_ASUS;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
/* no S/PDIF out */
spec->multiout.dig_out_nid = 0;
if (board_config == CXT5066_HP_LAPTOP)
spec->multiout.dig_out_nid = 0;
/* input source automatically selected */
spec->input_mux = NULL;
spec->port_d_mode = 0;
@ -3207,7 +3228,7 @@ static int patch_cxt5066(struct hda_codec *codec)
break;
case CXT5066_DELL_VOSTRO:
codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_vostro_event;
codec->patch_ops.unsol_event = cxt5066_unsol_event;
spec->init_verbs[0] = cxt5066_init_verbs_vostro;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master_olpc;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
@ -3224,7 +3245,7 @@ static int patch_cxt5066(struct hda_codec *codec)
break;
case CXT5066_IDEAPAD:
codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_ideapad_event;
codec->patch_ops.unsol_event = cxt5066_unsol_event;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
spec->init_verbs[0] = cxt5066_init_verbs_ideapad;
@ -3240,7 +3261,7 @@ static int patch_cxt5066(struct hda_codec *codec)
break;
case CXT5066_THINKPAD:
codec->patch_ops.init = cxt5066_init;
codec->patch_ops.unsol_event = cxt5066_thinkpad_event;
codec->patch_ops.unsol_event = cxt5066_unsol_event;
spec->mixers[spec->num_mixers++] = cxt5066_mixer_master;
spec->mixers[spec->num_mixers++] = cxt5066_mixers;
spec->init_verbs[0] = cxt5066_init_verbs_thinkpad;

View File

@ -92,6 +92,8 @@ struct oxygen_model {
void (*update_dac_volume)(struct oxygen *chip);
void (*update_dac_mute)(struct oxygen *chip);
void (*update_center_lfe_mix)(struct oxygen *chip, bool mixed);
unsigned int (*adjust_dac_routing)(struct oxygen *chip,
unsigned int play_routing);
void (*gpio_changed)(struct oxygen *chip);
void (*uart_input)(struct oxygen *chip);
void (*ac97_switch)(struct oxygen *chip,

View File

@ -180,6 +180,8 @@ void oxygen_update_dac_routing(struct oxygen *chip)
(1 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) |
(2 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) |
(3 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT);
if (chip->model.adjust_dac_routing)
reg_value = chip->model.adjust_dac_routing(chip, reg_value);
oxygen_write16_masked(chip, OXYGEN_PLAY_ROUTING, reg_value,
OXYGEN_PLAY_DAC0_SOURCE_MASK |
OXYGEN_PLAY_DAC1_SOURCE_MASK |

View File

@ -24,6 +24,11 @@
*
* SPI 0 -> CS4245
*
* I²S 1 -> CS4245
* I²S 2 -> CS4361 (center/LFE)
* I²S 3 -> CS4361 (surround)
* I²S 4 -> CS4361 (front)
*
* GPIO 3 <- ?
* GPIO 4 <- headphone detect
* GPIO 5 -> route input jack to line-in (0) or mic-in (1)
@ -36,6 +41,7 @@
* input 1 <- aux
* input 2 <- front mic
* input 4 <- line/mic
* DAC out -> headphones
* aux out -> front panel headphones
*/
@ -207,6 +213,35 @@ static void set_cs4245_adc_params(struct oxygen *chip,
cs4245_write_cached(chip, CS4245_ADC_CTRL, value);
}
static inline unsigned int shift_bits(unsigned int value,
unsigned int shift_from,
unsigned int shift_to,
unsigned int mask)
{
if (shift_from < shift_to)
return (value << (shift_to - shift_from)) & mask;
else
return (value >> (shift_from - shift_to)) & mask;
}
static unsigned int adjust_dg_dac_routing(struct oxygen *chip,
unsigned int play_routing)
{
return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
shift_bits(play_routing,
OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
OXYGEN_PLAY_DAC1_SOURCE_MASK) |
shift_bits(play_routing,
OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
OXYGEN_PLAY_DAC2_SOURCE_MASK) |
shift_bits(play_routing,
OXYGEN_PLAY_DAC0_SOURCE_SHIFT,
OXYGEN_PLAY_DAC3_SOURCE_SHIFT,
OXYGEN_PLAY_DAC3_SOURCE_MASK);
}
static int output_switch_info(struct snd_kcontrol *ctl,
struct snd_ctl_elem_info *info)
{
@ -557,6 +592,7 @@ struct oxygen_model model_xonar_dg = {
.resume = dg_resume,
.set_dac_params = set_cs4245_dac_params,
.set_adc_params = set_cs4245_adc_params,
.adjust_dac_routing = adjust_dg_dac_routing,
.dump_registers = dump_cs4245_registers,
.model_data_size = sizeof(struct dg),
.device_config = PLAYBACK_0_TO_I2S |

View File

@ -22,7 +22,7 @@
#define __PDAUDIOCF_H
#include <sound/pcm.h>
#include <asm/io.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>

View File

@ -23,8 +23,8 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/io.h>
#include <sound/core.h>
#include <asm/io.h>
#include "vxpocket.h"

View File

@ -153,7 +153,7 @@ static int cq93vc_resume(struct snd_soc_codec *codec)
static int cq93vc_probe(struct snd_soc_codec *codec)
{
struct davinci_vc *davinci_vc = codec->dev->platform_data;
struct davinci_vc *davinci_vc = snd_soc_codec_get_drvdata(codec);
davinci_vc->cq93vc.codec = codec;
codec->control_data = davinci_vc;

View File

@ -367,9 +367,12 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
return 0;
}
static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC;
static struct snd_soc_codec_driver cx20442_codec_dev = {
.probe = cx20442_codec_probe,
.remove = cx20442_codec_remove,
.reg_cache_default = &cx20442_reg,
.reg_cache_size = 1,
.reg_word_size = sizeof(u8),
.read = cx20442_read_reg_cache,

View File

@ -507,8 +507,6 @@ static int ams_delta_cx20442_init(struct snd_soc_pcm_runtime *rtd)
/* Set up digital mute if not provided by the codec */
if (!codec_dai->driver->ops) {
codec_dai->driver->ops = &ams_delta_dai_ops;
} else if (!codec_dai->driver->ops->digital_mute) {
codec_dai->driver->ops->digital_mute = ams_delta_digital_mute;
} else {
ams_delta_ops.startup = ams_delta_startup;
ams_delta_ops.shutdown = ams_delta_shutdown;

View File

@ -1664,9 +1664,6 @@ static int soc_probe_aux_dev(struct snd_soc_card *card, int num)
goto out;
found:
if (!try_module_get(codec->dev->driver->owner))
return -ENODEV;
ret = soc_probe_codec(card, codec);
if (ret < 0)
return ret;

View File

@ -1742,7 +1742,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
unsigned int val, val_mask;
unsigned int val;
int connect, change;
struct snd_soc_dapm_update update;
@ -1750,13 +1750,13 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
if (invert)
val = max - val;
val_mask = mask << shift;
mask = mask << shift;
val = val << shift;
mutex_lock(&widget->codec->mutex);
widget->value = val;
change = snd_soc_test_bits(widget->codec, reg, val_mask, val);
change = snd_soc_test_bits(widget->codec, reg, mask, val);
if (change) {
if (val)
/* new connection */

View File

@ -90,7 +90,7 @@ int __perf_evsel__read(struct perf_evsel *evsel,
int cpu, thread;
struct perf_counts_values *aggr = &evsel->counts->aggr, count;
aggr->val = 0;
aggr->val = aggr->ena = aggr->run = 0;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {