This commit is contained in:
Sorgelig
2021-09-17 01:39:23 +08:00
parent 137491a75a
commit b6f2ca1c4d
404 changed files with 3610 additions and 1924 deletions

View File

@@ -29,7 +29,7 @@ recur_count
cpoint_name
Where in the kernel to trigger the action. It can be
one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD,
FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_QUEUE_RQ,
IDE_CORE_CP, or DIRECT
cpoint_type

View File

@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 14
SUBLEVEL = 0
SUBLEVEL = 5
EXTRAVERSION =
NAME = Opossums on Parade

View File

@@ -2832,7 +2832,7 @@
&emmc {
status = "okay";
clk-phase-mmc-hs200 = <180>, <180>;
clk-phase-mmc-hs200 = <210>, <228>;
};
&fsim0 {

View File

@@ -208,12 +208,12 @@
};
pinctrl_hvi3c3_default: hvi3c3_default {
function = "HVI3C3";
function = "I3C3";
groups = "HVI3C3";
};
pinctrl_hvi3c4_default: hvi3c4_default {
function = "HVI3C4";
function = "I3C4";
groups = "HVI3C4";
};

View File

@@ -92,6 +92,8 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio_leds>;
status = "okay"; /* Conflict with pwm0. */
red {
@@ -537,6 +539,10 @@
AT91_PIOA 19 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI) /* PA19 DAT2 periph A with pullup */
AT91_PIOA 20 AT91_PERIPH_A (AT91_PINCTRL_PULL_UP | AT91_PINCTRL_DRIVE_STRENGTH_HI)>; /* PA20 DAT3 periph A with pullup */
};
pinctrl_sdmmc0_cd: sdmmc0_cd {
atmel,pins =
<AT91_PIOA 23 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
};
sdmmc1 {
@@ -569,6 +575,14 @@
AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
};
leds {
pinctrl_gpio_leds: gpio_leds {
atmel,pins = <AT91_PIOB 11 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOB 12 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOB 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
};
}; /* pinctrl */
&pwm0 {
@@ -580,7 +594,7 @@
&sdmmc0 {
bus-width = <4>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdmmc0_default>;
pinctrl-0 = <&pinctrl_sdmmc0_default &pinctrl_sdmmc0_cd>;
status = "okay";
cd-gpios = <&pioA 23 GPIO_ACTIVE_LOW>;
disable-wp;

View File

@@ -57,6 +57,8 @@
};
spi0: spi@f0004000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0_cs>;
cs-gpios = <&pioD 13 0>, <0>, <0>, <&pioD 16 0>;
status = "okay";
};
@@ -169,6 +171,8 @@
};
spi1: spi@f8008000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi1_cs>;
cs-gpios = <&pioC 25 0>;
status = "okay";
};
@@ -248,6 +252,26 @@
<AT91_PIOE 3 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
pinctrl_gpio_leds: gpio_leds_default {
atmel,pins =
<AT91_PIOE 23 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOE 24 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
pinctrl_spi0_cs: spi0_cs_default {
atmel,pins =
<AT91_PIOD 13 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOD 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
pinctrl_spi1_cs: spi1_cs_default {
atmel,pins = <AT91_PIOC 25 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
pinctrl_vcc_mmc0_reg_gpio: vcc_mmc0_reg_gpio_default {
atmel,pins = <AT91_PIOE 2 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
};
};
};
@@ -339,6 +363,8 @@
vcc_mmc0_reg: fixedregulator_mmc0 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_vcc_mmc0_reg_gpio>;
gpio = <&pioE 2 GPIO_ACTIVE_LOW>;
regulator-name = "mmc0-card-supply";
regulator-min-microvolt = <3300000>;
@@ -362,6 +388,9 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio_leds>;
status = "okay";
d2 {
label = "d2";

View File

@@ -90,6 +90,8 @@
};
spi1: spi@fc018000 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_spi0_cs>;
cs-gpios = <&pioB 21 0>;
status = "okay";
};
@@ -147,6 +149,19 @@
atmel,pins =
<AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
};
pinctrl_spi0_cs: spi0_cs_default {
atmel,pins =
<AT91_PIOB 21 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
pinctrl_gpio_leds: gpio_leds_default {
atmel,pins =
<AT91_PIOD 30 AT91_PERIPH_GPIO AT91_PINCTRL_NONE
AT91_PIOE 15 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
pinctrl_vcc_mmc1_reg: vcc_mmc1_reg {
atmel,pins =
<AT91_PIOE 4 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>;
};
};
};
};
@@ -252,6 +267,8 @@
leds {
compatible = "gpio-leds";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_gpio_leds>;
status = "okay";
d8 {
@@ -278,6 +295,8 @@
vcc_mmc1_reg: fixedregulator_mmc1 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_vcc_mmc1_reg>;
gpio = <&pioE 4 GPIO_ACTIVE_LOW>;
regulator-name = "VDD MCI1";
regulator-min-microvolt = <3300000>;

View File

@@ -304,8 +304,13 @@
"pp2", "ppmmu2", "pp4", "ppmmu4",
"pp5", "ppmmu5", "pp6", "ppmmu6";
resets = <&reset RESET_MALI>;
clocks = <&clkc CLKID_CLK81>, <&clkc CLKID_MALI>;
clock-names = "bus", "core";
assigned-clocks = <&clkc CLKID_MALI>;
assigned-clock-rates = <318750000>;
operating-points-v2 = <&gpu_opp_table>;
#cooling-cells = <2>; /* min followed by max */
};

View File

@@ -148,7 +148,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&vcc_5v>;
pwm-supply = <&vcc_5v>;
pwms = <&pwm_cd 0 1148 0>;
pwm-dutycycle-range = <100 0>;
@@ -232,7 +232,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&vcc_5v>;
pwm-supply = <&vcc_5v>;
pwms = <&pwm_cd 1 1148 0>;
pwm-dutycycle-range = <100 0>;

View File

@@ -34,6 +34,8 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
pwm-supply = <&vcc_5v>;
pwms = <&pwm_cd 0 1148 0>;
pwm-dutycycle-range = <100 0>;
@@ -79,7 +81,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&vcc_5v>;
pwm-supply = <&vcc_5v>;
pwms = <&pwm_cd 1 1148 0>;
pwm-dutycycle-range = <100 0>;

View File

@@ -131,7 +131,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&p5v0>;
pwm-supply = <&p5v0>;
pwms = <&pwm_cd 0 12218 0>;
pwm-dutycycle-range = <91 0>;
@@ -163,7 +163,7 @@
regulator-min-microvolt = <860000>;
regulator-max-microvolt = <1140000>;
vin-supply = <&p5v0>;
pwm-supply = <&p5v0>;
pwms = <&pwm_cd 1 12218 0>;
pwm-dutycycle-range = <91 0>;

View File

@@ -102,7 +102,7 @@
#address-cells = <0>;
interrupt-controller;
reg = <0x11001000 0x1000>,
<0x11002000 0x1000>,
<0x11002000 0x2000>,
<0x11004000 0x2000>,
<0x11006000 0x2000>;
};

View File

@@ -135,6 +135,23 @@
pinctrl-0 = <&pcie_reset_pins &pcie_clkreq_pins>;
status = "okay";
reset-gpios = <&gpiosb 3 GPIO_ACTIVE_LOW>;
/*
* U-Boot port for Turris Mox has a bug which always expects that "ranges" DT property
* contains exactly 2 ranges with 3 (child) address cells, 2 (parent) address cells and
* 2 size cells and also expects that the second range starts at 16 MB offset. If these
* conditions are not met then U-Boot crashes during loading kernel DTB file. PCIe address
* space is 128 MB long, so the best split between MEM and IO is to use fixed 16 MB window
* for IO and the rest 112 MB (64+32+16) for MEM, despite that maximal IO size is just 64 kB.
* This bug is not present in U-Boot ports for other Armada 3700 devices and is fixed in
* U-Boot version 2021.07. See relevant U-Boot commits (the last one contains fix):
* https://source.denx.de/u-boot/u-boot/-/commit/cb2ddb291ee6fcbddd6d8f4ff49089dfe580f5d7
* https://source.denx.de/u-boot/u-boot/-/commit/c64ac3b3185aeb3846297ad7391fc6df8ecd73bf
* https://source.denx.de/u-boot/u-boot/-/commit/4a82fca8e330157081fc132a591ebd99ba02ee33
*/
#address-cells = <3>;
#size-cells = <2>;
ranges = <0x81000000 0 0xe8000000 0 0xe8000000 0 0x01000000 /* Port 0 IO */
0x82000000 0 0xe9000000 0 0xe9000000 0 0x07000000>; /* Port 0 MEM */
/* enabled by U-Boot if PCIe module is present */
status = "disabled";

View File

@@ -489,8 +489,15 @@
#interrupt-cells = <1>;
msi-parent = <&pcie0>;
msi-controller;
ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
/*
* The 128 MiB address range [0xe8000000-0xf0000000] is
* dedicated for PCIe and can be assigned to 8 windows
* with size a power of two. Use one 64 KiB window for
* IO at the end and the remaining seven windows
* (totaling 127 MiB) for MEM.
*/
ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x07f00000 /* Port 0 MEM */
0x81000000 0 0xefff0000 0 0xefff0000 0 0x00010000>; /* Port 0 IO */
interrupt-map-mask = <0 0 0 7>;
interrupt-map = <0 0 0 1 &pcie_intc 0>,
<0 0 0 2 &pcie_intc 1>,

View File

@@ -23,7 +23,7 @@ ap_h1_spi: &spi0 {};
adau7002: audio-codec-1 {
compatible = "adi,adau7002";
IOVDD-supply = <&pp1800_l15a>;
wakeup-delay-ms = <15>;
wakeup-delay-ms = <80>;
#sound-dai-cells = <0>;
};

View File

@@ -1437,9 +1437,9 @@
cpufreq_hw: cpufreq@18591000 {
compatible = "qcom,cpufreq-epss";
reg = <0 0x18591000 0 0x1000>,
<0 0x18592000 0 0x1000>,
<0 0x18593000 0 0x1000>;
reg = <0 0x18591100 0 0x900>,
<0 0x18592100 0 0x900>,
<0 0x18593100 0 0x900>;
clocks = <&rpmhcc RPMH_CXO_CLK>, <&gcc GCC_GPLL0>;
clock-names = "xo", "alternate";
#freq-domain-cells = <1>;

View File

@@ -2210,7 +2210,7 @@
<&gcc GCC_USB3_PHY_SEC_BCR>;
reset-names = "phy", "common";
usb_2_ssphy: lane@88eb200 {
usb_2_ssphy: lanes@88eb200 {
reg = <0 0x088eb200 0 0x200>,
<0 0x088eb400 0 0x200>,
<0 0x088eb800 0 0x800>;

View File

@@ -666,12 +666,10 @@
clocks = <&rpmhcc RPMH_IPA_CLK>;
clock-names = "core";
interconnects = <&aggre2_noc MASTER_IPA &gem_noc SLAVE_LLCC>,
<&mc_virt MASTER_LLCC &mc_virt SLAVE_EBI1>,
interconnects = <&aggre2_noc MASTER_IPA &mc_virt SLAVE_EBI1>,
<&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_IPA_CFG>;
interconnect-names = "ipa_to_llcc",
"llcc_to_ebi1",
"appss_to_ipa";
interconnect-names = "memory",
"config";
qcom,smem-states = <&ipa_smp2p_out 0>,
<&ipa_smp2p_out 1>;

View File

@@ -20,6 +20,7 @@
pinctrl-names = "default";
phy-handle = <&phy0>;
tx-internal-delay-ps = <2000>;
rx-internal-delay-ps = <1800>;
status = "okay";
phy0: ethernet-phy@0 {

View File

@@ -277,10 +277,6 @@
interrupt-parent = <&gpio1>;
interrupts = <28 IRQ_TYPE_LEVEL_LOW>;
/* Depends on LVDS */
max-clock = <135000000>;
min-vrefresh = <50>;
adi,input-depth = <8>;
adi,input-colorspace = "rgb";
adi,input-clock = "1x";

View File

@@ -15,6 +15,7 @@
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/kmemleak.h>
#include <linux/kvm.h>
#include <linux/kvm_irqfd.h>
#include <linux/irqbypass.h>
@@ -1986,6 +1987,12 @@ static int finalize_hyp_mode(void)
if (ret)
return ret;
/*
* Exclude HYP BSS from kmemleak so that it doesn't get peeked
* at, which would end badly once the section is inaccessible.
* None of other sections should ever be introspected.
*/
kmemleak_free_part(__hyp_bss_start, __hyp_bss_end - __hyp_bss_start);
ret = pkvm_mark_hyp_section(__hyp_bss);
if (ret)
return ret;

View File

@@ -60,6 +60,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
u32 val = cpuif->vgic_lr[lr];
u32 cpuid, intid = val & GICH_LR_VIRTUALID;
struct vgic_irq *irq;
bool deactivated;
/* Extract the source vCPU id from the LR */
cpuid = val & GICH_LR_PHYSID_CPUID;
@@ -75,7 +76,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
raw_spin_lock(&irq->irq_lock);
/* Always preserve the active bit */
/* Always preserve the active bit, note deactivation */
deactivated = irq->active && !(val & GICH_LR_ACTIVE_BIT);
irq->active = !!(val & GICH_LR_ACTIVE_BIT);
if (irq->active && vgic_irq_is_sgi(intid))
@@ -96,36 +98,8 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
if (irq->config == VGIC_CONFIG_LEVEL && !(val & GICH_LR_STATE))
irq->pending_latch = false;
/*
* Level-triggered mapped IRQs are special because we only
* observe rising edges as input to the VGIC.
*
* If the guest never acked the interrupt we have to sample
* the physical line and set the line level, because the
* device state could have changed or we simply need to
* process the still pending interrupt later.
*
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
*
* Another case is when the interrupt requires a helping hand
* on deactivation (no HW deactivation, for example).
*/
if (vgic_irq_is_mapped_level(irq)) {
bool resample = false;
if (val & GICH_LR_PENDING_BIT) {
irq->line_level = vgic_get_phys_line_level(irq);
resample = !irq->line_level;
} else if (vgic_irq_needs_resampling(irq) &&
!(irq->active || irq->pending_latch)) {
resample = true;
}
if (resample)
vgic_irq_set_phys_active(irq, false);
}
/* Handle resampling for mapped interrupts if required */
vgic_irq_handle_resampling(irq, deactivated, val & GICH_LR_PENDING_BIT);
raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);

View File

@@ -46,6 +46,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
u32 intid, cpuid;
struct vgic_irq *irq;
bool is_v2_sgi = false;
bool deactivated;
cpuid = val & GICH_LR_PHYSID_CPUID;
cpuid >>= GICH_LR_PHYSID_CPUID_SHIFT;
@@ -68,7 +69,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
raw_spin_lock(&irq->irq_lock);
/* Always preserve the active bit */
/* Always preserve the active bit, note deactivation */
deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
irq->active = !!(val & ICH_LR_ACTIVE_BIT);
if (irq->active && is_v2_sgi)
@@ -89,36 +91,8 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
irq->pending_latch = false;
/*
* Level-triggered mapped IRQs are special because we only
* observe rising edges as input to the VGIC.
*
* If the guest never acked the interrupt we have to sample
* the physical line and set the line level, because the
* device state could have changed or we simply need to
* process the still pending interrupt later.
*
* If this causes us to lower the level, we have to also clear
* the physical active state, since we will otherwise never be
* told when the interrupt becomes asserted again.
*
* Another case is when the interrupt requires a helping hand
* on deactivation (no HW deactivation, for example).
*/
if (vgic_irq_is_mapped_level(irq)) {
bool resample = false;
if (val & ICH_LR_PENDING_BIT) {
irq->line_level = vgic_get_phys_line_level(irq);
resample = !irq->line_level;
} else if (vgic_irq_needs_resampling(irq) &&
!(irq->active || irq->pending_latch)) {
resample = true;
}
if (resample)
vgic_irq_set_phys_active(irq, false);
}
/* Handle resampling for mapped interrupts if required */
vgic_irq_handle_resampling(irq, deactivated, val & ICH_LR_PENDING_BIT);
raw_spin_unlock(&irq->irq_lock);
vgic_put_irq(vcpu->kvm, irq);

View File

@@ -1022,3 +1022,41 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
return map_is_active;
}
/*
* Level-triggered mapped IRQs are special because we only observe rising
* edges as input to the VGIC.
*
* If the guest never acked the interrupt we have to sample the physical
* line and set the line level, because the device state could have changed
* or we simply need to process the still pending interrupt later.
*
* We could also have entered the guest with the interrupt active+pending.
* On the next exit, we need to re-evaluate the pending state, as it could
* otherwise result in a spurious interrupt by injecting a now potentially
* stale pending state.
*
* If this causes us to lower the level, we have to also clear the physical
* active state, since we will otherwise never be told when the interrupt
* becomes asserted again.
*
* Another case is when the interrupt requires a helping hand on
* deactivation (no HW deactivation, for example).
*/
void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending)
{
if (vgic_irq_is_mapped_level(irq)) {
bool resample = false;
if (unlikely(vgic_irq_needs_resampling(irq))) {
resample = !(irq->active || irq->pending_latch);
} else if (lr_pending || (lr_deactivated && irq->line_level)) {
irq->line_level = vgic_get_phys_line_level(irq);
resample = !irq->line_level;
}
if (resample)
vgic_irq_set_phys_active(irq, false);
}
}

View File

@@ -169,6 +169,8 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
unsigned long flags);
void vgic_kick_vcpus(struct kvm *kvm);
void vgic_irq_handle_resampling(struct vgic_irq *irq,
bool lr_deactivated, bool lr_pending);
int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
phys_addr_t addr, phys_addr_t alignment);

View File

@@ -26,6 +26,7 @@ config COLDFIRE
bool "Coldfire CPU family support"
select ARCH_HAVE_CUSTOM_GPIO_H
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_CAS
select CPU_HAS_NO_MULDIV64
select GENERIC_CSUM
select GPIOLIB
@@ -39,6 +40,7 @@ config M68000
bool
depends on !MMU
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_CAS
select CPU_HAS_NO_MULDIV64
select CPU_HAS_NO_UNALIGNED
select GENERIC_CSUM
@@ -54,6 +56,7 @@ config M68000
config MCPU32
bool
select CPU_HAS_NO_BITFIELDS
select CPU_HAS_NO_CAS
select CPU_HAS_NO_UNALIGNED
select CPU_NO_EFFICIENT_FFS
help
@@ -383,7 +386,7 @@ config ADVANCED
config RMW_INSNS
bool "Use read-modify-write instructions"
depends on ADVANCED
depends on ADVANCED && !CPU_HAS_NO_CAS
help
This allows to use certain instructions that work with indivisible
read-modify-write bus cycles. While this is faster than the
@@ -450,6 +453,9 @@ config M68K_L2_CACHE
config CPU_HAS_NO_BITFIELDS
bool
config CPU_HAS_NO_CAS
bool
config CPU_HAS_NO_MULDIV64
bool

View File

@@ -78,7 +78,7 @@ int clk_enable(struct clk *clk)
unsigned long flags;
if (!clk)
return -EINVAL;
return 0;
spin_lock_irqsave(&clk_lock, flags);
if ((clk->enabled++ == 0) && clk->clk_ops)

View File

@@ -254,8 +254,8 @@ static void __exit nfeth_cleanup(void)
for (i = 0; i < MAX_UNIT; i++) {
if (nfeth_dev[i]) {
unregister_netdev(nfeth_dev[0]);
free_netdev(nfeth_dev[0]);
unregister_netdev(nfeth_dev[i]);
free_netdev(nfeth_dev[i]);
}
}
free_irq(nfEtherIRQ, nfeth_interrupt);

View File

@@ -48,7 +48,7 @@ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (arch_atomic_read(v))); \
: "di" (i), "2" (arch_atomic_read(v))); \
return t; \
}
@@ -63,7 +63,7 @@ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (arch_atomic_read(v))); \
: "di" (i), "2" (arch_atomic_read(v))); \
return tmp; \
}

View File

@@ -26,7 +26,7 @@
extern char input_data[];
extern int input_len;
/* output_len is inserted by the linker possibly at an unaligned address */
extern __le32 output_len __aligned(1);
extern char output_len;
extern char _text, _end;
extern char _bss, _ebss;
extern char _startcode_end;

View File

@@ -957,6 +957,7 @@ struct kvm_arch{
atomic64_t cmma_dirty_pages;
/* subset of available cpu features enabled by user space */
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
/* indexed by vcpu_idx */
DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
struct kvm_s390_gisa_interrupt gisa_int;
struct kvm_s390_pv pv;

View File

@@ -124,7 +124,8 @@ struct lowcore {
/* Restart function and parameter. */
__u64 restart_fn; /* 0x0370 */
__u64 restart_data; /* 0x0378 */
__u64 restart_source; /* 0x0380 */
__u32 restart_source; /* 0x0380 */
__u32 restart_flags; /* 0x0384 */
/* Address space pointer. */
__u64 kernel_asce; /* 0x0388 */

View File

@@ -26,6 +26,8 @@
#define _CIF_MCCK_GUEST BIT(CIF_MCCK_GUEST)
#define _CIF_DEDICATED_CPU BIT(CIF_DEDICATED_CPU)
#define RESTART_FLAG_CTLREGS _AC(1 << 0, U)
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>

View File

@@ -116,6 +116,7 @@ int main(void)
OFFSET(__LC_RESTART_FN, lowcore, restart_fn);
OFFSET(__LC_RESTART_DATA, lowcore, restart_data);
OFFSET(__LC_RESTART_SOURCE, lowcore, restart_source);
OFFSET(__LC_RESTART_FLAGS, lowcore, restart_flags);
OFFSET(__LC_KERNEL_ASCE, lowcore, kernel_asce);
OFFSET(__LC_USER_ASCE, lowcore, user_asce);
OFFSET(__LC_LPP, lowcore, lpp);

View File

@@ -24,6 +24,7 @@
#include <linux/export.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/minmax.h>
#include <linux/debugfs.h>
#include <asm/debug.h>
@@ -92,6 +93,8 @@ static int debug_hex_ascii_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, const char *in_buf);
static int debug_sprintf_format_fn(debug_info_t *id, struct debug_view *view,
char *out_buf, debug_sprintf_entry_t *curr_event);
static void debug_areas_swap(debug_info_t *a, debug_info_t *b);
static void debug_events_append(debug_info_t *dest, debug_info_t *src);
/* globals */
@@ -311,24 +314,6 @@ static debug_info_t *debug_info_create(const char *name, int pages_per_area,
goto out;
rc->mode = mode & ~S_IFMT;
/* create root directory */
rc->debugfs_root_entry = debugfs_create_dir(rc->name,
debug_debugfs_root_entry);
/* append new element to linked list */
if (!debug_area_first) {
/* first element in list */
debug_area_first = rc;
rc->prev = NULL;
} else {
/* append element to end of list */
debug_area_last->next = rc;
rc->prev = debug_area_last;
}
debug_area_last = rc;
rc->next = NULL;
refcount_set(&rc->ref_count, 1);
out:
return rc;
@@ -388,27 +373,10 @@ static void debug_info_get(debug_info_t *db_info)
*/
static void debug_info_put(debug_info_t *db_info)
{
int i;
if (!db_info)
return;
if (refcount_dec_and_test(&db_info->ref_count)) {
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!db_info->views[i])
continue;
debugfs_remove(db_info->debugfs_entries[i]);
}
debugfs_remove(db_info->debugfs_root_entry);
if (db_info == debug_area_first)
debug_area_first = db_info->next;
if (db_info == debug_area_last)
debug_area_last = db_info->prev;
if (db_info->prev)
db_info->prev->next = db_info->next;
if (db_info->next)
db_info->next->prev = db_info->prev;
if (refcount_dec_and_test(&db_info->ref_count))
debug_info_free(db_info);
}
}
/*
@@ -632,6 +600,31 @@ static int debug_close(struct inode *inode, struct file *file)
return 0; /* success */
}
/* Create debugfs entries and add to internal list. */
static void _debug_register(debug_info_t *id)
{
/* create root directory */
id->debugfs_root_entry = debugfs_create_dir(id->name,
debug_debugfs_root_entry);
/* append new element to linked list */
if (!debug_area_first) {
/* first element in list */
debug_area_first = id;
id->prev = NULL;
} else {
/* append element to end of list */
debug_area_last->next = id;
id->prev = debug_area_last;
}
debug_area_last = id;
id->next = NULL;
debug_register_view(id, &debug_level_view);
debug_register_view(id, &debug_flush_view);
debug_register_view(id, &debug_pages_view);
}
/**
* debug_register_mode() - creates and initializes debug area.
*
@@ -661,19 +654,16 @@ debug_info_t *debug_register_mode(const char *name, int pages_per_area,
if ((uid != 0) || (gid != 0))
pr_warn("Root becomes the owner of all s390dbf files in sysfs\n");
BUG_ON(!initialized);
mutex_lock(&debug_mutex);
/* create new debug_info */
rc = debug_info_create(name, pages_per_area, nr_areas, buf_size, mode);
if (!rc)
goto out;
debug_register_view(rc, &debug_level_view);
debug_register_view(rc, &debug_flush_view);
debug_register_view(rc, &debug_pages_view);
out:
if (!rc)
if (rc) {
mutex_lock(&debug_mutex);
_debug_register(rc);
mutex_unlock(&debug_mutex);
} else {
pr_err("Registering debug feature %s failed\n", name);
mutex_unlock(&debug_mutex);
}
return rc;
}
EXPORT_SYMBOL(debug_register_mode);
@@ -702,6 +692,27 @@ debug_info_t *debug_register(const char *name, int pages_per_area,
}
EXPORT_SYMBOL(debug_register);
/* Remove debugfs entries and remove from internal list. */
static void _debug_unregister(debug_info_t *id)
{
int i;
for (i = 0; i < DEBUG_MAX_VIEWS; i++) {
if (!id->views[i])
continue;
debugfs_remove(id->debugfs_entries[i]);
}
debugfs_remove(id->debugfs_root_entry);
if (id == debug_area_first)
debug_area_first = id->next;
if (id == debug_area_last)
debug_area_last = id->prev;
if (id->prev)
id->prev->next = id->next;
if (id->next)
id->next->prev = id->prev;
}
/**
* debug_unregister() - give back debug area.
*
@@ -715,8 +726,10 @@ void debug_unregister(debug_info_t *id)
if (!id)
return;
mutex_lock(&debug_mutex);
debug_info_put(id);
_debug_unregister(id);
mutex_unlock(&debug_mutex);
debug_info_put(id);
}
EXPORT_SYMBOL(debug_unregister);
@@ -726,35 +739,28 @@ EXPORT_SYMBOL(debug_unregister);
*/
static int debug_set_size(debug_info_t *id, int nr_areas, int pages_per_area)
{
debug_entry_t ***new_areas;
debug_info_t *new_id;
unsigned long flags;
int rc = 0;
if (!id || (nr_areas <= 0) || (pages_per_area < 0))
return -EINVAL;
if (pages_per_area > 0) {
new_areas = debug_areas_alloc(pages_per_area, nr_areas);
if (!new_areas) {
pr_info("Allocating memory for %i pages failed\n",
pages_per_area);
rc = -ENOMEM;
goto out;
}
} else {
new_areas = NULL;
new_id = debug_info_alloc("", pages_per_area, nr_areas, id->buf_size,
id->level, ALL_AREAS);
if (!new_id) {
pr_info("Allocating memory for %i pages failed\n",
pages_per_area);
return -ENOMEM;
}
spin_lock_irqsave(&id->lock, flags);
debug_areas_free(id);
id->areas = new_areas;
id->nr_areas = nr_areas;
id->pages_per_area = pages_per_area;
id->active_area = 0;
memset(id->active_entries, 0, sizeof(int)*id->nr_areas);
memset(id->active_pages, 0, sizeof(int)*id->nr_areas);
debug_events_append(new_id, id);
debug_areas_swap(new_id, id);
debug_info_free(new_id);
spin_unlock_irqrestore(&id->lock, flags);
pr_info("%s: set new size (%i pages)\n", id->name, pages_per_area);
out:
return rc;
return 0;
}
/**
@@ -821,6 +827,42 @@ static inline debug_entry_t *get_active_entry(debug_info_t *id)
id->active_entries[id->active_area]);
}
/* Swap debug areas of a and b. */
static void debug_areas_swap(debug_info_t *a, debug_info_t *b)
{
swap(a->nr_areas, b->nr_areas);
swap(a->pages_per_area, b->pages_per_area);
swap(a->areas, b->areas);
swap(a->active_area, b->active_area);
swap(a->active_pages, b->active_pages);
swap(a->active_entries, b->active_entries);
}
/* Append all debug events in active area from source to destination log. */
static void debug_events_append(debug_info_t *dest, debug_info_t *src)
{
debug_entry_t *from, *to, *last;
if (!src->areas || !dest->areas)
return;
/* Loop over all entries in src, starting with oldest. */
from = get_active_entry(src);
last = from;
do {
if (from->clock != 0LL) {
to = get_active_entry(dest);
memset(to, 0, dest->entry_size);
memcpy(to, from, min(src->entry_size,
dest->entry_size));
proceed_active_entry(dest);
}
proceed_active_entry(src);
from = get_active_entry(src);
} while (from != last);
}
/*
* debug_finish_entry:
* - set timestamp, caller address, cpu number etc.

View File

@@ -624,12 +624,15 @@ ENTRY(mcck_int_handler)
4: j 4b
ENDPROC(mcck_int_handler)
#
# PSW restart interrupt handler
#
ENTRY(restart_int_handler)
ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
stg %r15,__LC_SAVE_AREA_RESTART
TSTMSK __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
jz 0f
la %r15,4095
lctlg %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
0: larl %r15,.Lstosm_tmp
stosm 0(%r15),0x04 # turn dat on, keep irqs off
lg %r15,__LC_RESTART_STACK
xc STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
@@ -638,7 +641,7 @@ ENTRY(restart_int_handler)
xc 0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
lg %r1,__LC_RESTART_FN # load fn, parm & source cpu
lg %r2,__LC_RESTART_DATA
lg %r3,__LC_RESTART_SOURCE
lgf %r3,__LC_RESTART_SOURCE
ltgr %r3,%r3 # test source cpu address
jm 1f # negative -> skip source stop
0: sigp %r4,%r3,SIGP_SENSE # sigp sense to source cpu

View File

@@ -179,8 +179,6 @@ static inline int __diag308(unsigned long subcode, void *addr)
int diag308(unsigned long subcode, void *addr)
{
if (IS_ENABLED(CONFIG_KASAN))
__arch_local_irq_stosm(0x04); /* enable DAT */
diag_stat_inc(DIAG_STAT_X308);
return __diag308(subcode, addr);
}
@@ -1843,7 +1841,6 @@ static struct kobj_attribute on_restart_attr = __ATTR_RW(on_restart);
static void __do_restart(void *ignore)
{
__arch_local_irq_stosm(0x04); /* enable DAT */
smp_send_stop();
#ifdef CONFIG_CRASH_DUMP
crash_kexec(NULL);

View File

@@ -263,7 +263,6 @@ static void __do_machine_kexec(void *data)
*/
static void __machine_kexec(void *data)
{
__arch_local_irq_stosm(0x04); /* enable DAT */
pfault_fini();
tracing_off();
debug_locks_off();

View File

@@ -421,7 +421,7 @@ static void __init setup_lowcore_dat_off(void)
lc->restart_stack = (unsigned long) restart_stack;
lc->restart_fn = (unsigned long) do_restart;
lc->restart_data = 0;
lc->restart_source = -1UL;
lc->restart_source = -1U;
mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
if (!mcck_stack)
@@ -450,12 +450,19 @@ static void __init setup_lowcore_dat_off(void)
static void __init setup_lowcore_dat_on(void)
{
struct lowcore *lc = lowcore_ptr[0];
__ctl_clear_bit(0, 28);
S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
__ctl_store(S390_lowcore.cregs_save_area, 0, 15);
__ctl_set_bit(0, 28);
mem_assign_absolute(S390_lowcore.restart_flags, RESTART_FLAG_CTLREGS);
mem_assign_absolute(S390_lowcore.program_new_psw, lc->program_new_psw);
memcpy_absolute(&S390_lowcore.cregs_save_area, lc->cregs_save_area,
sizeof(S390_lowcore.cregs_save_area));
}
static struct resource code_resource = {

View File

@@ -252,6 +252,7 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
lc->cpu_nr = cpu;
lc->restart_flags = RESTART_FLAG_CTLREGS;
lc->spinlock_lockval = arch_spin_lockval(cpu);
lc->spinlock_index = 0;
lc->percpu_offset = __per_cpu_offset[cpu];
@@ -297,7 +298,7 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
lc->restart_stack = lc->nodat_stack;
lc->restart_fn = (unsigned long) func;
lc->restart_data = (unsigned long) data;
lc->restart_source = -1UL;
lc->restart_source = -1U;
pcpu_sigp_retry(pcpu, SIGP_RESTART, 0);
}
@@ -311,12 +312,12 @@ static void __pcpu_delegate(pcpu_delegate_fn *func, void *data)
func(data); /* should not return */
}
static void __no_sanitize_address pcpu_delegate(struct pcpu *pcpu,
pcpu_delegate_fn *func,
void *data, unsigned long stack)
static void pcpu_delegate(struct pcpu *pcpu,
pcpu_delegate_fn *func,
void *data, unsigned long stack)
{
struct lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
unsigned long source_cpu = stap();
unsigned int source_cpu = stap();
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
if (pcpu->address == source_cpu) {
@@ -569,6 +570,9 @@ static void smp_ctl_bit_callback(void *info)
__ctl_load(cregs, 0, 15);
}
static DEFINE_SPINLOCK(ctl_lock);
static unsigned long ctlreg;
/*
* Set a bit in a control register of all cpus
*/
@@ -576,6 +580,11 @@ void smp_ctl_set_bit(int cr, int bit)
{
struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
spin_lock(&ctl_lock);
memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
__set_bit(bit, &ctlreg);
memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
spin_unlock(&ctl_lock);
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
}
EXPORT_SYMBOL(smp_ctl_set_bit);
@@ -587,6 +596,11 @@ void smp_ctl_clear_bit(int cr, int bit)
{
struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
spin_lock(&ctl_lock);
memcpy_absolute(&ctlreg, &S390_lowcore.cregs_save_area[cr], sizeof(ctlreg));
__clear_bit(bit, &ctlreg);
memcpy_absolute(&S390_lowcore.cregs_save_area[cr], &ctlreg, sizeof(ctlreg));
spin_unlock(&ctl_lock);
on_each_cpu(smp_ctl_bit_callback, &parms, 1);
}
EXPORT_SYMBOL(smp_ctl_clear_bit);
@@ -895,14 +909,13 @@ static void smp_init_secondary(void)
/*
* Activate a secondary processor.
*/
static void __no_sanitize_address smp_start_secondary(void *cpuvoid)
static void smp_start_secondary(void *cpuvoid)
{
S390_lowcore.restart_stack = (unsigned long) restart_stack;
S390_lowcore.restart_fn = (unsigned long) do_restart;
S390_lowcore.restart_data = 0;
S390_lowcore.restart_source = -1UL;
__ctl_load(S390_lowcore.cregs_save_area, 0, 15);
__load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_DAT);
S390_lowcore.restart_source = -1U;
S390_lowcore.restart_flags = 0;
call_on_stack_noreturn(smp_init_secondary, S390_lowcore.kernel_stack);
}

View File

@@ -419,13 +419,13 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
set_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
}
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
{
kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
}
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
@@ -3050,18 +3050,18 @@ int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
static void __airqs_kick_single_vcpu(struct kvm *kvm, u8 deliverable_mask)
{
int vcpu_id, online_vcpus = atomic_read(&kvm->online_vcpus);
int vcpu_idx, online_vcpus = atomic_read(&kvm->online_vcpus);
struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
struct kvm_vcpu *vcpu;
for_each_set_bit(vcpu_id, kvm->arch.idle_mask, online_vcpus) {
vcpu = kvm_get_vcpu(kvm, vcpu_id);
for_each_set_bit(vcpu_idx, kvm->arch.idle_mask, online_vcpus) {
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
if (psw_ioint_disabled(vcpu))
continue;
deliverable_mask &= (u8)(vcpu->arch.sie_block->gcr[6] >> 24);
if (deliverable_mask) {
/* lately kicked but not yet running */
if (test_and_set_bit(vcpu_id, gi->kicked_mask))
if (test_and_set_bit(vcpu_idx, gi->kicked_mask))
return;
kvm_s390_vcpu_wakeup(vcpu);
return;

View File

@@ -4044,7 +4044,7 @@ static int vcpu_pre_run(struct kvm_vcpu *vcpu)
kvm_s390_patch_guest_per_regs(vcpu);
}
clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.gisa_int.kicked_mask);
clear_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.gisa_int.kicked_mask);
vcpu->arch.sie_block->icptcode = 0;
cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);

View File

@@ -79,7 +79,7 @@ static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
{
return test_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
return test_bit(kvm_vcpu_get_idx(vcpu), vcpu->kvm->arch.idle_mask);
}
static inline int kvm_is_ucontrol(struct kvm *kvm)

View File

@@ -107,6 +107,9 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
}
/*
* The first 1MB of 1:1 mapping is mapped with 4KB pages
*/
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
@@ -157,30 +160,26 @@ static void __init kasan_early_pgtable_populate(unsigned long address,
pm_dir = pmd_offset(pu_dir, address);
if (pmd_none(*pm_dir)) {
if (mode == POPULATE_ZERO_SHADOW &&
IS_ALIGNED(address, PMD_SIZE) &&
if (IS_ALIGNED(address, PMD_SIZE) &&
end - address >= PMD_SIZE) {
pmd_populate(&init_mm, pm_dir,
kasan_early_shadow_pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
/* the first megabyte of 1:1 is mapped with 4k pages */
if (has_edat && address && end - address >= PMD_SIZE &&
mode != POPULATE_ZERO_SHADOW) {
void *page;
if (mode == POPULATE_ZERO_SHADOW) {
pmd_populate(&init_mm, pm_dir, kasan_early_shadow_pte);
address = (address + PMD_SIZE) & PMD_MASK;
continue;
} else if (has_edat && address) {
void *page;
if (mode == POPULATE_ONE2ONE) {
page = (void *)address;
} else {
page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
if (mode == POPULATE_ONE2ONE) {
page = (void *)address;
} else {
page = kasan_early_alloc_segment();
memset(page, 0, _SEGMENT_SIZE);
}
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pmd_val(*pm_dir) = __pa(page) | sgt_prot;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
pt_dir = kasan_early_pte_alloc();
pmd_populate(&init_mm, pm_dir, pt_dir);
} else if (pmd_large(*pm_dir)) {

View File

@@ -661,9 +661,10 @@ int zpci_enable_device(struct zpci_dev *zdev)
{
int rc;
rc = clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES);
if (rc)
if (clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES)) {
rc = -EIO;
goto out;
}
rc = zpci_dma_init_device(zdev);
if (rc)
@@ -684,7 +685,7 @@ int zpci_disable_device(struct zpci_dev *zdev)
* The zPCI function may already be disabled by the platform, this is
* detected in clp_disable_fh() which becomes a no-op.
*/
return clp_disable_fh(zdev);
return clp_disable_fh(zdev) ? -EIO : 0;
}
/**

View File

@@ -213,15 +213,19 @@ out:
}
static int clp_refresh_fh(u32 fid);
/*
* Enable/Disable a given PCI function and update its function handle if
* necessary
/**
* clp_set_pci_fn() - Execute a command on a PCI function
* @zdev: Function that will be affected
* @nr_dma_as: DMA address space number
* @command: The command code to execute
*
* Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
* > 0 for non-success platform responses
*/
static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
{
struct clp_req_rsp_set_pci *rrb;
int rc, retries = 100;
u32 fid = zdev->fid;
rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
@@ -245,17 +249,16 @@ static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
}
} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
zpci_err("Set PCI FN:\n");
zpci_err_clp(rrb->response.hdr.rsp, rc);
}
if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
zdev->fh = rrb->response.fh;
} else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY &&
rrb->response.fh == 0) {
} else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY) {
/* Function is already in desired state - update handle */
rc = clp_refresh_fh(fid);
rc = clp_refresh_fh(zdev->fid);
} else {
zpci_err("Set PCI FN:\n");
zpci_err_clp(rrb->response.hdr.rsp, rc);
if (!rc)
rc = rrb->response.hdr.rsp;
}
clp_free_block(rrb);
return rc;
@@ -301,17 +304,13 @@ int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
if (rc)
goto out;
if (zpci_use_mio(zdev)) {
if (!rc && zpci_use_mio(zdev)) {
rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
zdev->fid, zdev->fh, rc);
if (rc)
clp_disable_fh(zdev);
}
out:
return rc;
}

View File

@@ -849,6 +849,8 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
return -EINVAL;
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
if (unlikely(tail > 0 && walk.nbytes < walk.total)) {
int blocks = DIV_ROUND_UP(req->cryptlen, AES_BLOCK_SIZE) - 2;
@@ -862,7 +864,10 @@ static int xts_crypt(struct skcipher_request *req, bool encrypt)
skcipher_request_set_crypt(&subreq, req->src, req->dst,
blocks * AES_BLOCK_SIZE, req->iv);
req = &subreq;
err = skcipher_walk_virt(&walk, req, false);
if (err)
return err;
} else {
tail = 0;
}

View File

@@ -3838,6 +3838,22 @@ clear_attr_update:
return ret;
}
static void
pmu_iio_cleanup_mapping(struct intel_uncore_type *type, struct attribute_group *ag)
{
struct attribute **attr = ag->attrs;
if (!attr)
return;
for (; *attr; attr++)
kfree((*attr)->name);
kfree(attr_to_ext_attr(*ag->attrs));
kfree(ag->attrs);
ag->attrs = NULL;
kfree(type->topology);
}
static int skx_iio_set_mapping(struct intel_uncore_type *type)
{
return pmu_iio_set_mapping(type, &skx_iio_mapping_group);
@@ -3845,17 +3861,7 @@ static int skx_iio_set_mapping(struct intel_uncore_type *type)
static void skx_iio_cleanup_mapping(struct intel_uncore_type *type)
{
struct attribute **attr = skx_iio_mapping_group.attrs;
if (!attr)
return;
for (; *attr; attr++)
kfree((*attr)->name);
kfree(attr_to_ext_attr(*skx_iio_mapping_group.attrs));
kfree(skx_iio_mapping_group.attrs);
skx_iio_mapping_group.attrs = NULL;
kfree(type->topology);
pmu_iio_cleanup_mapping(type, &skx_iio_mapping_group);
}
static struct intel_uncore_type skx_uncore_iio = {
@@ -4501,6 +4507,11 @@ static int snr_iio_set_mapping(struct intel_uncore_type *type)
return pmu_iio_set_mapping(type, &snr_iio_mapping_group);
}
static void snr_iio_cleanup_mapping(struct intel_uncore_type *type)
{
pmu_iio_cleanup_mapping(type, &snr_iio_mapping_group);
}
static struct intel_uncore_type snr_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -4517,7 +4528,7 @@ static struct intel_uncore_type snr_uncore_iio = {
.attr_update = snr_iio_attr_update,
.get_topology = snr_iio_get_topology,
.set_mapping = snr_iio_set_mapping,
.cleanup_mapping = skx_iio_cleanup_mapping,
.cleanup_mapping = snr_iio_cleanup_mapping,
};
static struct intel_uncore_type snr_uncore_irp = {
@@ -5092,6 +5103,11 @@ static int icx_iio_set_mapping(struct intel_uncore_type *type)
return pmu_iio_set_mapping(type, &icx_iio_mapping_group);
}
static void icx_iio_cleanup_mapping(struct intel_uncore_type *type)
{
pmu_iio_cleanup_mapping(type, &icx_iio_mapping_group);
}
static struct intel_uncore_type icx_uncore_iio = {
.name = "iio",
.num_counters = 4,
@@ -5109,7 +5125,7 @@ static struct intel_uncore_type icx_uncore_iio = {
.attr_update = icx_iio_attr_update,
.get_topology = icx_iio_get_topology,
.set_mapping = icx_iio_set_mapping,
.cleanup_mapping = skx_iio_cleanup_mapping,
.cleanup_mapping = icx_iio_cleanup_mapping,
};
static struct intel_uncore_type icx_uncore_irp = {

View File

@@ -265,6 +265,7 @@ enum mcp_flags {
MCP_TIMESTAMP = BIT(0), /* log time stamp */
MCP_UC = BIT(1), /* log uncorrected errors */
MCP_DONTLOG = BIT(2), /* only clear, don't log */
MCP_QUEUE_LOG = BIT(3), /* only queue to genpool */
};
bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b);

View File

@@ -817,7 +817,10 @@ log_it:
if (mca_cfg.dont_log_ce && !mce_usable_address(&m))
goto clear_it;
mce_log(&m);
if (flags & MCP_QUEUE_LOG)
mce_gen_pool_add(&m);
else
mce_log(&m);
clear_it:
/*
@@ -1639,10 +1642,12 @@ static void __mcheck_cpu_init_generic(void)
m_fl = MCP_DONTLOG;
/*
* Log the machine checks left over from the previous reset.
* Log the machine checks left over from the previous reset. Log them
* only, do not start processing them. That will happen in mcheck_late_init()
* when all consumers have been registered on the notifier chain.
*/
bitmap_fill(all_banks, MAX_NR_BANKS);
machine_check_poll(MCP_UC | m_fl, &all_banks);
machine_check_poll(MCP_UC | MCP_QUEUE_LOG | m_fl, &all_banks);
cr4_set_bits(X86_CR4_MCE);

View File

@@ -388,10 +388,11 @@ static const struct dmi_system_id reboot_dmi_table[] __initconst = {
},
{ /* Handle problems with rebooting on the OptiPlex 990. */
.callback = set_pci_reboot,
.ident = "Dell OptiPlex 990",
.ident = "Dell OptiPlex 990 BIOS A0x",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
DMI_MATCH(DMI_BIOS_VERSION, "A0"),
},
},
{ /* Handle problems with rebooting on Dell 300's */

View File

@@ -323,12 +323,6 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception)
{
/* Check if guest physical address doesn't exceed guest maximum */
if (kvm_vcpu_is_illegal_gpa(vcpu, gpa)) {
exception->error_code |= PFERR_RSVD_MASK;
return UNMAPPED_GVA;
}
return gpa;
}
@@ -2852,6 +2846,7 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
kvm_pfn_t pfn, int max_level)
{
struct kvm_lpage_info *linfo;
int host_level;
max_level = min(max_level, max_huge_page_level);
for ( ; max_level > PG_LEVEL_4K; max_level--) {
@@ -2863,7 +2858,8 @@ int kvm_mmu_max_mapping_level(struct kvm *kvm,
if (max_level == PG_LEVEL_4K)
return PG_LEVEL_4K;
return host_pfn_mapping_level(kvm, gfn, pfn, slot);
host_level = host_pfn_mapping_level(kvm, gfn, pfn, slot);
return min(host_level, max_level);
}
int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
@@ -2887,17 +2883,12 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
if (!slot)
return PG_LEVEL_4K;
level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
if (level == PG_LEVEL_4K)
return level;
*req_level = level = min(level, max_level);
/*
* Enforce the iTLB multihit workaround after capturing the requested
* level, which will be used to do precise, accurate accounting.
*/
if (huge_page_disallowed)
*req_level = level = kvm_mmu_max_mapping_level(vcpu->kvm, slot, gfn, pfn, max_level);
if (level == PG_LEVEL_4K || huge_page_disallowed)
return PG_LEVEL_4K;
/*

View File

@@ -412,6 +412,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
bool was_leaf = was_present && is_last_spte(old_spte, level);
bool is_leaf = is_present && is_last_spte(new_spte, level);
bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
bool was_large, is_large;
WARN_ON(level > PT64_ROOT_MAX_LEVEL);
WARN_ON(level < PG_LEVEL_4K);
@@ -445,13 +446,6 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
if (is_large_pte(old_spte) != is_large_pte(new_spte)) {
if (is_large_pte(old_spte))
atomic64_sub(1, (atomic64_t*)&kvm->stat.lpages);
else
atomic64_add(1, (atomic64_t*)&kvm->stat.lpages);
}
/*
* The only times a SPTE should be changed from a non-present to
* non-present state is when an MMIO entry is installed/modified/
@@ -477,6 +471,18 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
return;
}
/*
* Update large page stats if a large page is being zapped, created, or
* is replacing an existing shadow page.
*/
was_large = was_leaf && is_large_pte(old_spte);
is_large = is_leaf && is_large_pte(new_spte);
if (was_large != is_large) {
if (was_large)
atomic64_sub(1, (atomic64_t *)&kvm->stat.lpages);
else
atomic64_add(1, (atomic64_t *)&kvm->stat.lpages);
}
if (was_leaf && is_dirty_spte(old_spte) &&
(!is_present || !is_dirty_spte(new_spte) || pfn_changed))

View File

@@ -2223,12 +2223,11 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
~PIN_BASED_VMX_PREEMPTION_TIMER);
/* Posted interrupts setting is only taken from vmcs12. */
if (nested_cpu_has_posted_intr(vmcs12)) {
vmx->nested.pi_pending = false;
if (nested_cpu_has_posted_intr(vmcs12))
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx->nested.pi_pending = false;
} else {
else
exec_control &= ~PIN_BASED_POSTED_INTR;
}
pin_controls_set(vmx, exec_control);
/*

View File

@@ -6368,6 +6368,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->emulation_required)
return;
if (vmx->exit_reason.basic == EXIT_REASON_EXTERNAL_INTERRUPT)
handle_external_interrupt_irqoff(vcpu);
else if (vmx->exit_reason.basic == EXIT_REASON_EXCEPTION_NMI)

View File

@@ -3316,6 +3316,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated) {
s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
adjust_tsc_offset_guest(vcpu, adj);
/* Before back to guest, tsc_timestamp must be adjusted
* as well, otherwise guest's percpu pvclock time could jump.
*/
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}

View File

@@ -30,7 +30,7 @@ config XTENSA
select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU
select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_PCI

View File

@@ -2361,6 +2361,9 @@ static int bfq_request_merge(struct request_queue *q, struct request **req,
__rq = bfq_find_rq_fmerge(bfqd, bio, q);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}

View File

@@ -979,6 +979,14 @@ static int bio_iov_bvec_set_append(struct bio *bio, struct iov_iter *iter)
return 0;
}
static void bio_put_pages(struct page **pages, size_t size, size_t off)
{
size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE);
for (i = 0; i < nr; i++)
put_page(pages[i]);
}
#define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *))
/**
@@ -1023,8 +1031,10 @@ static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
if (same_page)
put_page(page);
} else {
if (WARN_ON_ONCE(bio_full(bio, len)))
return -EINVAL;
if (WARN_ON_ONCE(bio_full(bio, len))) {
bio_put_pages(pages + i, left, offset);
return -EINVAL;
}
__bio_add_page(bio, page, len, offset);
}
offset = 0;
@@ -1069,6 +1079,7 @@ static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter)
len = min_t(size_t, PAGE_SIZE - offset, left);
if (bio_add_hw_page(q, bio, page, len, offset,
max_append_sectors, &same_page) != len) {
bio_put_pages(pages + i, left, offset);
ret = -EINVAL;
break;
}

View File

@@ -332,7 +332,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
if (mode->keysize == 0)
return -EINVAL;
if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
if (dun_bytes == 0 || dun_bytes > mode->ivsize)
return -EINVAL;
if (!is_power_of_2(data_unit_size))

View File

@@ -348,6 +348,8 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
trace_block_split(split, (*bio)->bi_iter.bi_sector);
submit_bio_noacct(*bio);
*bio = split;
blk_throtl_charge_bio_split(*bio);
}
}
@@ -705,22 +707,6 @@ static void blk_account_io_merge_request(struct request *req)
}
}
/*
* Two cases of handling DISCARD merge:
* If max_discard_segments > 1, the driver takes every bio
* as a range and send them to controller together. The ranges
* needn't to be contiguous.
* Otherwise, the bios/requests will be handled as same as
* others which should be contiguous.
*/
static inline bool blk_discard_mergable(struct request *req)
{
if (req_op(req) == REQ_OP_DISCARD &&
queue_max_discard_segments(req->q) > 1)
return true;
return false;
}
static enum elv_merge blk_try_req_merge(struct request *req,
struct request *next)
{

View File

@@ -178,6 +178,9 @@ struct throtl_grp {
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
unsigned long bio_cnt_reset_time;
atomic_t io_split_cnt[2];
atomic_t last_io_split_cnt[2];
struct blkg_rwstat stat_bytes;
struct blkg_rwstat stat_ios;
};
@@ -777,6 +780,8 @@ static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
tg->bytes_disp[rw] = 0;
tg->io_disp[rw] = 0;
atomic_set(&tg->io_split_cnt[rw], 0);
/*
* Previous slice has expired. We must have trimmed it after last
* bio dispatch. That means since start of last slice, we never used
@@ -799,6 +804,9 @@ static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
tg->io_disp[rw] = 0;
tg->slice_start[rw] = jiffies;
tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
atomic_set(&tg->io_split_cnt[rw], 0);
throtl_log(&tg->service_queue,
"[%c] new slice start=%lu end=%lu jiffies=%lu",
rw == READ ? 'R' : 'W', tg->slice_start[rw],
@@ -1031,6 +1039,9 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
jiffies + tg->td->throtl_slice);
}
if (iops_limit != UINT_MAX)
tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
if (wait)
@@ -2052,12 +2063,14 @@ static void throtl_downgrade_check(struct throtl_grp *tg)
}
if (tg->iops[READ][LIMIT_LOW]) {
tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
iops = tg->last_io_disp[READ] * HZ / elapsed_time;
if (iops >= tg->iops[READ][LIMIT_LOW])
tg->last_low_overflow_time[READ] = now;
}
if (tg->iops[WRITE][LIMIT_LOW]) {
tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
if (iops >= tg->iops[WRITE][LIMIT_LOW])
tg->last_low_overflow_time[WRITE] = now;
@@ -2176,6 +2189,25 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
}
#endif
void blk_throtl_charge_bio_split(struct bio *bio)
{
struct blkcg_gq *blkg = bio->bi_blkg;
struct throtl_grp *parent = blkg_to_tg(blkg);
struct throtl_service_queue *parent_sq;
bool rw = bio_data_dir(bio);
do {
if (!parent->has_rules[rw])
break;
atomic_inc(&parent->io_split_cnt[rw]);
atomic_inc(&parent->last_io_split_cnt[rw]);
parent_sq = parent->service_queue.parent_sq;
parent = sq_to_tg(parent_sq);
} while (parent);
}
bool blk_throtl_bio(struct bio *bio)
{
struct request_queue *q = bio->bi_bdev->bd_disk->queue;

View File

@@ -289,11 +289,13 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
extern void blk_throtl_register_queue(struct request_queue *q);
extern void blk_throtl_charge_bio_split(struct bio *bio);
bool blk_throtl_bio(struct bio *bio);
#else /* CONFIG_BLK_DEV_THROTTLING */
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }
static inline void blk_throtl_register_queue(struct request_queue *q) { }
static inline void blk_throtl_charge_bio_split(struct bio *bio) { }
static inline bool blk_throtl_bio(struct bio *bio) { return false; }
#endif /* CONFIG_BLK_DEV_THROTTLING */
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW

View File

@@ -336,6 +336,9 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
__rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
if (__rq && elv_bio_merge_ok(__rq, bio)) {
*req = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_BACK_MERGE;
}

View File

@@ -629,6 +629,8 @@ static int dd_request_merge(struct request_queue *q, struct request **rq,
if (elv_bio_merge_ok(__rq, bio)) {
*rq = __rq;
if (blk_discard_mergable(__rq))
return ELEVATOR_DISCARD_MERGE;
return ELEVATOR_FRONT_MERGE;
}
}

View File

@@ -57,11 +57,19 @@ endif
redirect_openssl = 2>&1
quiet_redirect_openssl = 2>&1
silent_redirect_openssl = 2>/dev/null
openssl_available = $(shell openssl help 2>/dev/null && echo yes)
# We do it this way rather than having a boolean option for enabling an
# external private key, because 'make randconfig' might enable such a
# boolean option and we unfortunately can't make it depend on !RANDCONFIG.
ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
ifeq ($(openssl_available),yes)
X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
endif
$(obj)/signing_key.pem: $(obj)/x509.genkey
@$(kecho) "###"
@$(kecho) "### Now generating an X.509 key pair to be used for signing modules."

View File

@@ -27,6 +27,7 @@
#define _CRYPTO_ECC_H
#include <crypto/ecc_curve.h>
#include <asm/unaligned.h>
/* One digit is u64 qword. */
#define ECC_CURVE_NIST_P192_DIGITS 3
@@ -46,13 +47,13 @@
* @out: Output array
* @ndigits: Number of digits to copy
*/
static inline void ecc_swap_digits(const u64 *in, u64 *out, unsigned int ndigits)
static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigits)
{
const __be64 *src = (__force __be64 *)in;
int i;
for (i = 0; i < ndigits; i++)
out[i] = be64_to_cpu(src[ndigits - 1 - i]);
out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
}
/**

View File

@@ -290,6 +290,11 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
}
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
ret);
goto out_free_tfm;
}
for (i = 0; i < num_mb; ++i)
if (testmgr_alloc_buf(data[i].xbuf)) {
@@ -315,7 +320,7 @@ static void test_mb_aead_speed(const char *algo, int enc, int secs,
for (i = 0; i < num_mb; ++i) {
data[i].req = aead_request_alloc(tfm, GFP_KERNEL);
if (!data[i].req) {
pr_err("alg: skcipher: Failed to allocate request for %s\n",
pr_err("alg: aead: Failed to allocate request for %s\n",
algo);
while (i--)
aead_request_free(data[i].req);
@@ -567,13 +572,19 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
sgout = &sg[9];
tfm = crypto_alloc_aead(algo, 0, 0);
if (IS_ERR(tfm)) {
pr_err("alg: aead: Failed to load transform for %s: %ld\n", algo,
PTR_ERR(tfm));
goto out_notfm;
}
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
pr_err("alg: aead: Failed to setauthsize for %s: %d\n", algo,
ret);
goto out_noreq;
}
crypto_init_wait(&wait);
printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
get_driver_name(crypto_aead, tfm), e);
@@ -611,8 +622,13 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
break;
}
}
ret = crypto_aead_setkey(tfm, key, *keysize);
ret = crypto_aead_setauthsize(tfm, authsize);
if (ret) {
pr_err("setkey() failed flags=%x: %d\n",
crypto_aead_get_flags(tfm), ret);
goto out;
}
iv_len = crypto_aead_ivsize(tfm);
if (iv_len)
@@ -622,15 +638,8 @@ static void test_aead_speed(const char *algo, int enc, unsigned int secs,
printk(KERN_INFO "test %u (%d bit key, %d byte blocks): ",
i, *keysize * 8, bs);
memset(tvmem[0], 0xff, PAGE_SIZE);
if (ret) {
pr_err("setkey() failed flags=%x\n",
crypto_aead_get_flags(tfm));
goto out;
}
sg_init_aead(sg, xbuf, bs + (enc ? 0 : authsize),
assoc, aad_size);

View File

@@ -288,10 +288,18 @@ invalid_guid:
void __init init_prmt(void)
{
struct acpi_table_header *tbl;
acpi_status status;
int mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
int mc;
status = acpi_get_table(ACPI_SIG_PRMT, 0, &tbl);
if (ACPI_FAILURE(status))
return;
mc = acpi_table_parse_entries(ACPI_SIG_PRMT, sizeof(struct acpi_table_prmt) +
sizeof (struct acpi_table_prmt_header),
0, acpi_parse_prmt, 0);
acpi_put_table(tbl);
/*
* Return immediately if PRMT table is not present or no PRM module found.
*/

View File

@@ -5573,7 +5573,7 @@ int ata_host_start(struct ata_host *host)
have_stop = 1;
}
if (host->ops->host_stop)
if (host->ops && host->ops->host_stop)
have_stop = 1;
if (have_stop) {

View File

@@ -323,8 +323,8 @@ static int hd44780_remove(struct platform_device *pdev)
{
struct charlcd *lcd = platform_get_drvdata(pdev);
kfree(lcd->drvdata);
charlcd_unregister(lcd);
kfree(lcd->drvdata);
kfree(lcd);
return 0;

View File

@@ -580,7 +580,8 @@ re_probe:
goto probe_failed;
}
if (driver_sysfs_add(dev)) {
ret = driver_sysfs_add(dev);
if (ret) {
pr_err("%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
goto probe_failed;
@@ -602,15 +603,18 @@ re_probe:
goto probe_failed;
}
if (device_add_groups(dev, drv->dev_groups)) {
ret = device_add_groups(dev, drv->dev_groups);
if (ret) {
dev_err(dev, "device_add_groups() failed\n");
goto dev_groups_failed;
}
if (dev_has_sync_state(dev) &&
device_create_file(dev, &dev_attr_state_synced)) {
dev_err(dev, "state_synced sysfs add failed\n");
goto dev_sysfs_state_synced_failed;
if (dev_has_sync_state(dev)) {
ret = device_create_file(dev, &dev_attr_state_synced);
if (ret) {
dev_err(dev, "state_synced sysfs add failed\n");
goto dev_sysfs_state_synced_failed;
}
}
if (test_remove) {

View File

@@ -165,7 +165,7 @@ static inline int fw_state_wait(struct fw_priv *fw_priv)
return __fw_state_wait_common(fw_priv, MAX_SCHEDULE_TIMEOUT);
}
static int fw_cache_piggyback_on_request(const char *name);
static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv);
static struct fw_priv *__allocate_fw_priv(const char *fw_name,
struct firmware_cache *fwc,
@@ -707,10 +707,8 @@ int assign_fw(struct firmware *fw, struct device *device)
* on request firmware.
*/
if (!(fw_priv->opt_flags & FW_OPT_NOCACHE) &&
fw_priv->fwc->state == FW_LOADER_START_CACHE) {
if (fw_cache_piggyback_on_request(fw_priv->fw_name))
kref_get(&fw_priv->ref);
}
fw_priv->fwc->state == FW_LOADER_START_CACHE)
fw_cache_piggyback_on_request(fw_priv);
/* pass the pages buffer to driver at the last minute */
fw_set_page_data(fw_priv, fw);
@@ -1259,11 +1257,11 @@ static int __fw_entry_found(const char *name)
return 0;
}
static int fw_cache_piggyback_on_request(const char *name)
static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
{
struct firmware_cache *fwc = &fw_cache;
const char *name = fw_priv->fw_name;
struct firmware_cache *fwc = fw_priv->fwc;
struct fw_cache_entry *fce;
int ret = 0;
spin_lock(&fwc->name_lock);
if (__fw_entry_found(name))
@@ -1271,13 +1269,12 @@ static int fw_cache_piggyback_on_request(const char *name)
fce = alloc_fw_cache_entry(name);
if (fce) {
ret = 1;
list_add(&fce->list, &fwc->fw_names);
kref_get(&fw_priv->ref);
pr_debug("%s: fw: %s\n", __func__, name);
}
found:
spin_unlock(&fwc->name_lock);
return ret;
}
static void free_fw_cache_entry(struct fw_cache_entry *fce)
@@ -1508,9 +1505,8 @@ static inline void unregister_fw_pm_ops(void)
unregister_pm_notifier(&fw_cache.pm_notify);
}
#else
static int fw_cache_piggyback_on_request(const char *name)
static void fw_cache_piggyback_on_request(struct fw_priv *fw_priv)
{
return 0;
}
static inline int register_fw_pm_ops(void)
{

View File

@@ -1667,7 +1667,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
if (ret) {
dev_err(map->dev,
"Error in caching of register: %x ret: %d\n",
reg + i, ret);
reg + regmap_get_offset(map, i), ret);
return ret;
}
}

View File

@@ -236,6 +236,7 @@ EXPORT_SYMBOL(bcma_core_irq);
void bcma_prepare_core(struct bcma_bus *bus, struct bcma_device *core)
{
device_initialize(&core->dev);
core->dev.release = bcma_release_core_dev;
core->dev.bus = &bcma_bus_type;
dev_set_name(&core->dev, "bcma%d:%d", bus->num, core->core_index);
@@ -277,11 +278,10 @@ static void bcma_register_core(struct bcma_bus *bus, struct bcma_device *core)
{
int err;
err = device_register(&core->dev);
err = device_add(&core->dev);
if (err) {
bcma_err(bus, "Could not register dev for core 0x%03X\n",
core->id.id);
put_device(&core->dev);
return;
}
core->dev_registered = true;
@@ -372,7 +372,7 @@ void bcma_unregister_cores(struct bcma_bus *bus)
/* Now noone uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
kfree(core);
put_device(&core->dev);
}
}

View File

@@ -4029,23 +4029,23 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
if (fdc_state[FDC(drive)].rawcmd == 1)
fdc_state[FDC(drive)].rawcmd = 2;
if (mode & (FMODE_READ|FMODE_WRITE)) {
drive_state[drive].last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags);
if (bdev_check_media_change(bdev))
floppy_revalidate(bdev->bd_disk);
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
if (!(mode & FMODE_NDELAY)) {
if (mode & (FMODE_READ|FMODE_WRITE)) {
drive_state[drive].last_checked = 0;
clear_bit(FD_OPEN_SHOULD_FAIL_BIT,
&drive_state[drive].flags);
if (bdev_check_media_change(bdev))
floppy_revalidate(bdev->bd_disk);
if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags))
goto out;
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
goto out;
}
res = -EROFS;
if ((mode & FMODE_WRITE) &&
!test_bit(FD_DISK_WRITABLE_BIT, &drive_state[drive].flags))
goto out;
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;

View File

@@ -49,6 +49,7 @@
static DEFINE_IDR(nbd_index_idr);
static DEFINE_MUTEX(nbd_index_mutex);
static struct workqueue_struct *nbd_del_wq;
static int nbd_total_devices = 0;
struct nbd_sock {
@@ -113,6 +114,7 @@ struct nbd_device {
struct mutex config_lock;
struct gendisk *disk;
struct workqueue_struct *recv_workq;
struct work_struct remove_work;
struct list_head list;
struct task_struct *task_recv;
@@ -233,7 +235,7 @@ static const struct device_attribute backend_attr = {
.show = backend_show,
};
static void nbd_dev_remove(struct nbd_device *nbd)
static void nbd_del_disk(struct nbd_device *nbd)
{
struct gendisk *disk = nbd->disk;
@@ -242,16 +244,53 @@ static void nbd_dev_remove(struct nbd_device *nbd)
blk_cleanup_disk(disk);
blk_mq_free_tag_set(&nbd->tag_set);
}
}
/*
* Place this in the last just before the nbd is freed to
* make sure that the disk and the related kobject are also
* totally removed to avoid duplicate creation of the same
* one.
*/
if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) && nbd->destroy_complete)
/*
* Place this in the last just before the nbd is freed to
* make sure that the disk and the related kobject are also
* totally removed to avoid duplicate creation of the same
* one.
*/
static void nbd_notify_destroy_completion(struct nbd_device *nbd)
{
if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
nbd->destroy_complete)
complete(nbd->destroy_complete);
}
static void nbd_dev_remove_work(struct work_struct *work)
{
struct nbd_device *nbd =
container_of(work, struct nbd_device, remove_work);
nbd_del_disk(nbd);
mutex_lock(&nbd_index_mutex);
/*
* Remove from idr after del_gendisk() completes,
* so if the same id is reused, the following
* add_disk() will succeed.
*/
idr_remove(&nbd_index_idr, nbd->index);
nbd_notify_destroy_completion(nbd);
mutex_unlock(&nbd_index_mutex);
kfree(nbd);
}
static void nbd_dev_remove(struct nbd_device *nbd)
{
/* Call del_gendisk() asynchrounously to prevent deadlock */
if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags)) {
queue_work(nbd_del_wq, &nbd->remove_work);
return;
}
nbd_del_disk(nbd);
idr_remove(&nbd_index_idr, nbd->index);
nbd_notify_destroy_completion(nbd);
kfree(nbd);
}
@@ -259,7 +298,6 @@ static void nbd_put(struct nbd_device *nbd)
{
if (refcount_dec_and_mutex_lock(&nbd->refs,
&nbd_index_mutex)) {
idr_remove(&nbd_index_idr, nbd->index);
nbd_dev_remove(nbd);
mutex_unlock(&nbd_index_mutex);
}
@@ -1388,6 +1426,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
unsigned int cmd, unsigned long arg)
{
struct nbd_config *config = nbd->config;
loff_t bytesize;
switch (cmd) {
case NBD_DISCONNECT:
@@ -1402,8 +1441,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
case NBD_SET_SIZE:
return nbd_set_size(nbd, arg, config->blksize);
case NBD_SET_SIZE_BLOCKS:
return nbd_set_size(nbd, arg * config->blksize,
config->blksize);
if (check_mul_overflow((loff_t)arg, config->blksize, &bytesize))
return -EINVAL;
return nbd_set_size(nbd, bytesize, config->blksize);
case NBD_SET_TIMEOUT:
nbd_set_cmd_timeout(nbd, arg);
return 0;
@@ -1683,6 +1723,7 @@ static int nbd_dev_add(int index)
nbd->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_BLOCKING;
nbd->tag_set.driver_data = nbd;
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
nbd->destroy_complete = NULL;
nbd->backend = NULL;
@@ -1729,7 +1770,17 @@ static int nbd_dev_add(int index)
refcount_set(&nbd->refs, 1);
INIT_LIST_HEAD(&nbd->list);
disk->major = NBD_MAJOR;
/* Too big first_minor can cause duplicate creation of
* sysfs files/links, since first_minor will be truncated to
* byte in __device_add_disk().
*/
disk->first_minor = index << part_shift;
if (disk->first_minor > 0xff) {
err = -EINVAL;
goto out_free_idr;
}
disk->minors = 1 << part_shift;
disk->fops = &nbd_fops;
disk->private_data = nbd;
@@ -2424,7 +2475,14 @@ static int __init nbd_init(void)
if (register_blkdev(NBD_MAJOR, "nbd"))
return -EIO;
nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
if (!nbd_del_wq) {
unregister_blkdev(NBD_MAJOR, "nbd");
return -ENOMEM;
}
if (genl_register_family(&nbd_genl_family)) {
destroy_workqueue(nbd_del_wq);
unregister_blkdev(NBD_MAJOR, "nbd");
return -EINVAL;
}
@@ -2442,7 +2500,10 @@ static int nbd_exit_cb(int id, void *ptr, void *data)
struct list_head *list = (struct list_head *)data;
struct nbd_device *nbd = ptr;
list_add_tail(&nbd->list, list);
/* Skip nbd that is being removed asynchronously */
if (refcount_read(&nbd->refs))
list_add_tail(&nbd->list, list);
return 0;
}
@@ -2465,6 +2526,9 @@ static void __exit nbd_cleanup(void)
nbd_put(nbd);
}
/* Also wait for nbd_dev_remove_work() completes */
destroy_workqueue(nbd_del_wq);
idr_destroy(&nbd_index_idr);
genl_unregister_family(&nbd_genl_family);
unregister_blkdev(NBD_MAJOR, "nbd");

View File

@@ -452,6 +452,10 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
/* Bluetooth component of Realtek 8852AE device */
{ USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK |
@@ -525,6 +529,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
#define BTUSB_HW_RESET_ACTIVE 12
#define BTUSB_TX_WAIT_VND_EVT 13
#define BTUSB_WAKEUP_DISABLE 14
#define BTUSB_USE_ALT3_FOR_WBS 15
struct btusb_data {
struct hci_dev *hdev;
@@ -1757,16 +1762,20 @@ static void btusb_work(struct work_struct *work)
/* Bluetooth USB spec recommends alt 6 (63 bytes), but
* many adapters do not support it. Alt 1 appears to
* work for all adapters that do not have alt 6, and
* which work with WBS at all.
* which work with WBS at all. Some devices prefer
* alt 3 (HCI payload >= 60 Bytes let air packet
* data satisfy 60 bytes), requiring
* MTU >= 3 (packets) * 25 (size) - 3 (headers) = 72
* see also Core spec 5, vol 4, B 2.1.1 & Table 2.1.
*/
new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
/* Because mSBC frames do not need to be aligned to the
* SCO packet boundary. If support the Alt 3, use the
* Alt 3 for HCI payload >= 60 Bytes let air packet
* data satisfy 60 bytes.
*/
if (new_alts == 1 && btusb_find_altsetting(data, 3))
if (btusb_find_altsetting(data, 6))
new_alts = 6;
else if (btusb_find_altsetting(data, 3) &&
hdev->sco_mtu >= 72 &&
test_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags))
new_alts = 3;
else
new_alts = 1;
}
if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@@ -1890,7 +1899,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
is_fake = true;
if (is_fake) {
bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds...");
bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds and force-suspending once...");
/* Generally these clones have big discrepancies between
* advertised features and what's actually supported.
@@ -1907,41 +1916,46 @@ static int btusb_setup_csr(struct hci_dev *hdev)
clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
/*
* Special workaround for clones with a Barrot 8041a02 chip,
* these clones are really messed-up:
* 1. Their bulk rx endpoint will never report any data unless
* the device was suspended at least once (yes really).
* Special workaround for these BT 4.0 chip clones, and potentially more:
*
* - 0x0134: a Barrot 8041a02 (HCI rev: 0x1012 sub: 0x0810)
* - 0x7558: IC markings FR3191AHAL 749H15143 (HCI rev/sub-version: 0x0709)
*
* These controllers are really messed-up.
*
* 1. Their bulk RX endpoint will never report any data unless
* the device was suspended at least once (yes, really).
* 2. They will not wakeup when autosuspended and receiving data
* on their bulk rx endpoint from e.g. a keyboard or mouse
* on their bulk RX endpoint from e.g. a keyboard or mouse
* (IOW remote-wakeup support is broken for the bulk endpoint).
*
* To fix 1. enable runtime-suspend, force-suspend the
* hci and then wake-it up by disabling runtime-suspend.
* HCI and then wake-it up by disabling runtime-suspend.
*
* To fix 2. clear the hci's can_wake flag, this way the hci
* To fix 2. clear the HCI's can_wake flag, this way the HCI
* will still be autosuspended when it is not open.
*
* --
*
* Because these are widespread problems we prefer generic solutions; so
* apply this initialization quirk to every controller that gets here,
* it should be harmless. The alternative is to not work at all.
*/
if (bcdDevice == 0x8891 &&
le16_to_cpu(rp->lmp_subver) == 0x1012 &&
le16_to_cpu(rp->hci_rev) == 0x0810 &&
le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_4_0) {
bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues");
pm_runtime_allow(&data->udev->dev);
pm_runtime_allow(&data->udev->dev);
ret = pm_runtime_suspend(&data->udev->dev);
if (ret >= 0)
msleep(200);
else
bt_dev_err(hdev, "CSR: Failed to suspend the device for our Barrot 8041a02 receive-issue workaround");
ret = pm_runtime_suspend(&data->udev->dev);
if (ret >= 0)
msleep(200);
else
bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround");
pm_runtime_forbid(&data->udev->dev);
pm_runtime_forbid(&data->udev->dev);
device_set_wakeup_capable(&data->udev->dev, false);
device_set_wakeup_capable(&data->udev->dev, false);
/* Re-enable autosuspend if this was requested */
if (enable_autosuspend)
usb_enable_autosuspend(data->udev);
}
/* Re-enable autosuspend if this was requested */
if (enable_autosuspend)
usb_enable_autosuspend(data->udev);
}
kfree_skb(skb);
@@ -2907,10 +2921,11 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
/* Read the Intel supported features and if new exception formats
* supported, need to load the additional DDC config to enable.
*/
btintel_read_debug_features(hdev, &features);
/* Set DDC mask for available debug features */
btintel_set_debug_features(hdev, &features);
err = btintel_read_debug_features(hdev, &features);
if (!err) {
/* Set DDC mask for available debug features */
btintel_set_debug_features(hdev, &features);
}
/* Read the Intel version information after loading the FW */
err = btintel_read_version(hdev, &ver);
@@ -3003,10 +3018,11 @@ static int btusb_setup_intel_newgen(struct hci_dev *hdev)
/* Read the Intel supported features and if new exception formats
* supported, need to load the additional DDC config to enable.
*/
btintel_read_debug_features(hdev, &features);
/* Set DDC mask for available debug features */
btintel_set_debug_features(hdev, &features);
err = btintel_read_debug_features(hdev, &features);
if (!err) {
/* Set DDC mask for available debug features */
btintel_set_debug_features(hdev, &features);
}
/* Read the Intel version information after loading the FW */
err = btintel_read_version_tlv(hdev, &version);
@@ -4742,6 +4758,7 @@ static int btusb_probe(struct usb_interface *intf,
* (DEVICE_REMOTE_WAKEUP)
*/
set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags);
}
if (!reset)

View File

@@ -89,7 +89,6 @@ config TCG_TIS_SYNQUACER
config TCG_TIS_I2C_CR50
tristate "TPM Interface Specification 2.0 Interface (I2C - CR50)"
depends on I2C
select TCG_CR50
help
This is a driver for the Google cr50 I2C TPM interface which is a
custom microcontroller and requires a custom i2c protocol interface

View File

@@ -106,17 +106,12 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
u16 len;
int sig;
if (!ibmvtpm->rtce_buf) {
dev_err(ibmvtpm->dev, "ibmvtpm device is not ready\n");
return 0;
}
sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd);
if (sig)
return -EINTR;
len = ibmvtpm->res_len;
if (count < len) {
@@ -237,7 +232,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
* set the processing flag before the Hcall, since we may get the
* result (interrupt) before even being able to check rc.
*/
ibmvtpm->tpm_processing_cmd = true;
ibmvtpm->tpm_processing_cmd = 1;
again:
rc = ibmvtpm_send_crq(ibmvtpm->vdev,
@@ -255,7 +250,7 @@ again:
goto again;
}
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
ibmvtpm->tpm_processing_cmd = false;
ibmvtpm->tpm_processing_cmd = 0;
}
spin_unlock(&ibmvtpm->rtce_lock);
@@ -269,7 +264,9 @@ static void tpm_ibmvtpm_cancel(struct tpm_chip *chip)
static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
{
return 0;
struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
return ibmvtpm->tpm_processing_cmd;
}
/**
@@ -457,7 +454,7 @@ static const struct tpm_class_ops tpm_ibmvtpm = {
.send = tpm_ibmvtpm_send,
.cancel = tpm_ibmvtpm_cancel,
.status = tpm_ibmvtpm_status,
.req_complete_mask = 0,
.req_complete_mask = 1,
.req_complete_val = 0,
.req_canceled = tpm_ibmvtpm_req_canceled,
};
@@ -550,7 +547,7 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
case VTPM_TPM_COMMAND_RES:
/* len of the data in rtce buffer */
ibmvtpm->res_len = be16_to_cpu(crq->len);
ibmvtpm->tpm_processing_cmd = false;
ibmvtpm->tpm_processing_cmd = 0;
wake_up_interruptible(&ibmvtpm->wq);
return;
default:
@@ -688,8 +685,15 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto init_irq_cleanup;
}
if (!strcmp(id->compat, "IBM,vtpm20")) {
if (!strcmp(id->compat, "IBM,vtpm20"))
chip->flags |= TPM_CHIP_FLAG_TPM2;
rc = tpm_get_timeouts(chip);
if (rc)
goto init_irq_cleanup;
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
rc = tpm2_get_cc_attrs_tbl(chip);
if (rc)
goto init_irq_cleanup;

View File

@@ -41,7 +41,7 @@ struct ibmvtpm_dev {
wait_queue_head_t wq;
u16 res_len;
u32 vtpm_version;
bool tpm_processing_cmd;
u8 tpm_processing_cmd;
};
#define CRQ_RES_BUF_SIZE PAGE_SIZE

View File

@@ -265,6 +265,7 @@ static const char *powersave_parents[] = {
static const struct clk_muxing_soc_desc kirkwood_mux_desc[] __initconst = {
{ "powersave", powersave_parents, ARRAY_SIZE(powersave_parents),
11, 1, 0 },
{ }
};
static struct clk *clk_muxing_get_src(

View File

@@ -579,7 +579,8 @@ static int sh_cmt_start(struct sh_cmt_channel *ch, unsigned long flag)
ch->flags |= flag;
/* setup timeout if no clockevent */
if ((flag == FLAG_CLOCKSOURCE) && (!(ch->flags & FLAG_CLOCKEVENT)))
if (ch->cmt->num_channels == 1 &&
flag == FLAG_CLOCKSOURCE && (!(ch->flags & FLAG_CLOCKEVENT)))
__sh_cmt_set_next(ch, ch->max_match_value);
out:
raw_spin_unlock_irqrestore(&ch->lock, flags);
@@ -621,20 +622,25 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs)
static u64 sh_cmt_clocksource_read(struct clocksource *cs)
{
struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
unsigned long flags;
u32 has_wrapped;
u64 value;
u32 raw;
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
raw = sh_cmt_get_counter(ch, &has_wrapped);
if (ch->cmt->num_channels == 1) {
unsigned long flags;
u64 value;
u32 raw;
if (unlikely(has_wrapped))
raw += ch->match_value + 1;
raw_spin_unlock_irqrestore(&ch->lock, flags);
raw_spin_lock_irqsave(&ch->lock, flags);
value = ch->total_cycles;
raw = sh_cmt_get_counter(ch, &has_wrapped);
return value + raw;
if (unlikely(has_wrapped))
raw += ch->match_value + 1;
raw_spin_unlock_irqrestore(&ch->lock, flags);
return value + raw;
}
return sh_cmt_get_counter(ch, &has_wrapped);
}
static int sh_cmt_clocksource_enable(struct clocksource *cs)
@@ -697,7 +703,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_channel *ch,
cs->disable = sh_cmt_clocksource_disable;
cs->suspend = sh_cmt_clocksource_suspend;
cs->resume = sh_cmt_clocksource_resume;
cs->mask = CLOCKSOURCE_MASK(sizeof(u64) * 8);
cs->mask = CLOCKSOURCE_MASK(ch->cmt->info->width);
cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
dev_info(&ch->cmt->pdev->dev, "ch%u: used as clock source\n",

View File

@@ -715,12 +715,13 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
case 1:
case 3:
quad8_preset_register_set(priv, count->id, ceiling);
break;
mutex_unlock(&priv->lock);
return len;
}
mutex_unlock(&priv->lock);
return len;
return -EINVAL;
}
static ssize_t quad8_count_preset_enable_read(struct counter_device *counter,

View File

@@ -157,11 +157,6 @@ struct sec_ctx {
struct device *dev;
};
enum sec_endian {
SEC_LE = 0,
SEC_32BE,
SEC_64BE
};
enum sec_debug_file_index {
SEC_CLEAR_ENABLE,

View File

@@ -312,31 +312,20 @@ static const struct pci_device_id sec_dev_ids[] = {
};
MODULE_DEVICE_TABLE(pci, sec_dev_ids);
static u8 sec_get_endian(struct hisi_qm *qm)
static void sec_set_endian(struct hisi_qm *qm)
{
u32 reg;
/*
* As for VF, it is a wrong way to get endian setting by
* reading a register of the engine
*/
if (qm->pdev->is_virtfn) {
dev_err_ratelimited(&qm->pdev->dev,
"cannot access a register in VF!\n");
return SEC_LE;
}
reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
/* BD little endian mode */
if (!(reg & BIT(0)))
return SEC_LE;
reg &= ~(BIT(1) | BIT(0));
if (!IS_ENABLED(CONFIG_64BIT))
reg |= BIT(1);
/* BD 32-bits big endian mode */
else if (!(reg & BIT(1)))
return SEC_32BE;
/* BD 64-bits big endian mode */
else
return SEC_64BE;
if (!IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
reg |= BIT(0);
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
}
static void sec_open_sva_prefetch(struct hisi_qm *qm)
@@ -429,9 +418,7 @@ static int sec_engine_init(struct hisi_qm *qm)
qm->io_base + SEC_BD_ERR_CHK_EN_REG3);
/* config endian */
reg = readl_relaxed(qm->io_base + SEC_CONTROL_REG);
reg |= sec_get_endian(qm);
writel_relaxed(reg, qm->io_base + SEC_CONTROL_REG);
sec_set_endian(qm);
return 0;
}
@@ -984,7 +971,8 @@ static int sec_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_alg_unregister:
hisi_qm_alg_unregister(qm, &sec_devices);
if (qm->qp_num >= ctx_q_num)
hisi_qm_alg_unregister(qm, &sec_devices);
err_qm_stop:
sec_debugfs_exit(qm);
hisi_qm_stop(qm, QM_NORMAL);

View File

@@ -170,15 +170,19 @@ static struct dcp *global_sdcp;
static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
{
int dma_err;
struct dcp *sdcp = global_sdcp;
const int chan = actx->chan;
uint32_t stat;
unsigned long ret;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
dma_addr_t desc_phys = dma_map_single(sdcp->dev, desc, sizeof(*desc),
DMA_TO_DEVICE);
dma_err = dma_mapping_error(sdcp->dev, desc_phys);
if (dma_err)
return dma_err;
reinit_completion(&sdcp->completion[chan]);
/* Clear status register. */
@@ -216,18 +220,29 @@ static int mxs_dcp_start_dma(struct dcp_async_ctx *actx)
static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
struct skcipher_request *req, int init)
{
dma_addr_t key_phys, src_phys, dst_phys;
struct dcp *sdcp = global_sdcp;
struct dcp_dma_desc *desc = &sdcp->coh->desc[actx->chan];
struct dcp_aes_req_ctx *rctx = skcipher_request_ctx(req);
int ret;
dma_addr_t key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_addr_t src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
dma_addr_t dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
key_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_key,
2 * AES_KEYSIZE_128, DMA_TO_DEVICE);
ret = dma_mapping_error(sdcp->dev, key_phys);
if (ret)
return ret;
src_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
ret = dma_mapping_error(sdcp->dev, src_phys);
if (ret)
goto err_src;
dst_phys = dma_map_single(sdcp->dev, sdcp->coh->aes_out_buf,
DCP_BUF_SZ, DMA_FROM_DEVICE);
ret = dma_mapping_error(sdcp->dev, dst_phys);
if (ret)
goto err_dst;
if (actx->fill % AES_BLOCK_SIZE) {
dev_err(sdcp->dev, "Invalid block size!\n");
@@ -265,10 +280,12 @@ static int mxs_dcp_run_aes(struct dcp_async_ctx *actx,
ret = mxs_dcp_start_dma(actx);
aes_done_run:
dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
err_dst:
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
err_src:
dma_unmap_single(sdcp->dev, key_phys, 2 * AES_KEYSIZE_128,
DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, src_phys, DCP_BUF_SZ, DMA_TO_DEVICE);
dma_unmap_single(sdcp->dev, dst_phys, DCP_BUF_SZ, DMA_FROM_DEVICE);
return ret;
}
@@ -557,6 +574,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
dma_addr_t buf_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_in_buf,
DCP_BUF_SZ, DMA_TO_DEVICE);
ret = dma_mapping_error(sdcp->dev, buf_phys);
if (ret)
return ret;
/* Fill in the DMA descriptor. */
desc->control0 = MXS_DCP_CONTROL0_DECR_SEMAPHORE |
MXS_DCP_CONTROL0_INTERRUPT |
@@ -589,6 +610,10 @@ static int mxs_dcp_run_sha(struct ahash_request *req)
if (rctx->fini) {
digest_phys = dma_map_single(sdcp->dev, sdcp->coh->sha_out_buf,
DCP_SHA_PAY_SZ, DMA_FROM_DEVICE);
ret = dma_mapping_error(sdcp->dev, digest_phys);
if (ret)
goto done_run;
desc->control0 |= MXS_DCP_CONTROL0_HASH_TERM;
desc->payload = digest_phys;
}

View File

@@ -1175,9 +1175,9 @@ static int omap_aes_probe(struct platform_device *pdev)
spin_lock_init(&dd->lock);
INIT_LIST_HEAD(&dd->list);
spin_lock(&list_lock);
spin_lock_bh(&list_lock);
list_add_tail(&dd->list, &dev_list);
spin_unlock(&list_lock);
spin_unlock_bh(&list_lock);
/* Initialize crypto engine */
dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1264,9 +1264,9 @@ static int omap_aes_remove(struct platform_device *pdev)
if (!dd)
return -ENODEV;
spin_lock(&list_lock);
spin_lock_bh(&list_lock);
list_del(&dd->list);
spin_unlock(&list_lock);
spin_unlock_bh(&list_lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {

View File

@@ -1033,9 +1033,9 @@ static int omap_des_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dd->list);
spin_lock(&list_lock);
spin_lock_bh(&list_lock);
list_add_tail(&dd->list, &dev_list);
spin_unlock(&list_lock);
spin_unlock_bh(&list_lock);
/* Initialize des crypto engine */
dd->engine = crypto_engine_alloc_init(dev, 1);
@@ -1094,9 +1094,9 @@ static int omap_des_remove(struct platform_device *pdev)
if (!dd)
return -ENODEV;
spin_lock(&list_lock);
spin_lock_bh(&list_lock);
list_del(&dd->list);
spin_unlock(&list_lock);
spin_unlock_bh(&list_lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)

View File

@@ -1736,7 +1736,7 @@ static void omap_sham_done_task(unsigned long data)
if (test_and_clear_bit(FLAGS_OUTPUT_READY, &dd->flags))
goto finish;
} else if (test_bit(FLAGS_DMA_READY, &dd->flags)) {
if (test_and_clear_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
if (test_bit(FLAGS_DMA_ACTIVE, &dd->flags)) {
omap_sham_update_dma_stop(dd);
if (dd->err) {
err = dd->err;
@@ -2144,9 +2144,9 @@ static int omap_sham_probe(struct platform_device *pdev)
(rev & dd->pdata->major_mask) >> dd->pdata->major_shift,
(rev & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
spin_lock(&sham.lock);
spin_lock_bh(&sham.lock);
list_add_tail(&dd->list, &sham.dev_list);
spin_unlock(&sham.lock);
spin_unlock_bh(&sham.lock);
dd->engine = crypto_engine_alloc_init(dev, 1);
if (!dd->engine) {
@@ -2194,9 +2194,9 @@ err_algs:
err_engine_start:
crypto_engine_exit(dd->engine);
err_engine:
spin_lock(&sham.lock);
spin_lock_bh(&sham.lock);
list_del(&dd->list);
spin_unlock(&sham.lock);
spin_unlock_bh(&sham.lock);
err_pm:
pm_runtime_disable(dev);
if (!dd->polling_mode)
@@ -2215,9 +2215,9 @@ static int omap_sham_remove(struct platform_device *pdev)
dd = platform_get_drvdata(pdev);
if (!dd)
return -ENODEV;
spin_lock(&sham.lock);
spin_lock_bh(&sham.lock);
list_del(&dd->list);
spin_unlock(&sham.lock);
spin_unlock_bh(&sham.lock);
for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--) {
crypto_unregister_ahash(

View File

@@ -81,10 +81,10 @@ void adf_init_hw_data_c3xxxiov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_init;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_shutdown;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;

View File

@@ -81,10 +81,10 @@ void adf_init_hw_data_c62xiov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_init;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_shutdown;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;

View File

@@ -198,8 +198,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
int adf_vf2pf_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev);
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
void adf_exit_pf_wq(void);
int adf_init_vf_wq(void);
@@ -222,12 +222,12 @@ static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
{
}
static inline int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
static inline int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
return 0;
}
static inline void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
static inline void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
{
}

View File

@@ -61,6 +61,7 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
struct service_hndl *service;
struct list_head *list_itr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
int ret;
if (!hw_data) {
dev_err(&GET_DEV(accel_dev),
@@ -127,9 +128,9 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
}
hw_data->enable_error_correction(accel_dev);
hw_data->enable_vf2pf_comms(accel_dev);
ret = hw_data->enable_vf2pf_comms(accel_dev);
return 0;
return ret;
}
EXPORT_SYMBOL_GPL(adf_dev_init);

View File

@@ -15,6 +15,8 @@
#include "adf_transport_access_macros.h"
#include "adf_transport_internal.h"
#define ADF_MAX_NUM_VFS 32
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
@@ -72,7 +74,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
u32 vf_mask;
unsigned long vf_mask;
/* Get the interrupt sources triggered by VFs */
vf_mask = ((ADF_CSR_RD(pmisc_bar_addr, ADF_ERRSOU5) &
@@ -93,8 +95,7 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
* unless the VF is malicious and is attempting to
* flood the host OS with VF2PF interrupts.
*/
for_each_set_bit(i, (const unsigned long *)&vf_mask,
(sizeof(vf_mask) * BITS_PER_BYTE)) {
for_each_set_bit(i, &vf_mask, ADF_MAX_NUM_VFS) {
vf_info = accel_dev->pf.vf_info + i;
if (!__ratelimit(&vf_info->vf2pf_ratelimit)) {

View File

@@ -186,7 +186,6 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
return ret;
}
EXPORT_SYMBOL_GPL(adf_iov_putmsg);
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
{
@@ -316,6 +315,8 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
msg |= ADF_PFVF_COMPATIBILITY_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
BUILD_BUG_ON(ADF_PFVF_COMPATIBILITY_VERSION > 255);
reinit_completion(&accel_dev->vf.iov_msg_completion);
/* Send request from VF to PF */
ret = adf_iov_putmsg(accel_dev, msg, 0);
if (ret) {

View File

@@ -5,14 +5,14 @@
#include "adf_pf2vf_msg.h"
/**
* adf_vf2pf_init() - send init msg to PF
* adf_vf2pf_notify_init() - send init msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends an init message from the VF to a PF
*
* Return: 0 on success, error code otherwise.
*/
int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -25,17 +25,17 @@ int adf_vf2pf_init(struct adf_accel_dev *accel_dev)
set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status);
return 0;
}
EXPORT_SYMBOL_GPL(adf_vf2pf_init);
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_init);
/**
* adf_vf2pf_shutdown() - send shutdown msg to PF
* adf_vf2pf_notify_shutdown() - send shutdown msg to PF
* @accel_dev: Pointer to acceleration VF device.
*
* Function sends a shutdown message from the VF to a PF
*
* Return: void
*/
void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
{
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
@@ -45,4 +45,4 @@ void adf_vf2pf_shutdown(struct adf_accel_dev *accel_dev)
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
EXPORT_SYMBOL_GPL(adf_vf2pf_shutdown);
EXPORT_SYMBOL_GPL(adf_vf2pf_notify_shutdown);

View File

@@ -160,6 +160,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_bar_addr = pmisc->virt_addr;
bool handled = false;
u32 v_int;
/* Read VF INT source CSR to determine the source of VF interrupt */
@@ -172,7 +173,7 @@ static irqreturn_t adf_isr(int irq, void *privdata)
/* Schedule tasklet to handle interrupt BH */
tasklet_hi_schedule(&accel_dev->vf.pf2vf_bh_tasklet);
return IRQ_HANDLED;
handled = true;
}
/* Check bundle interrupt */
@@ -184,10 +185,10 @@ static irqreturn_t adf_isr(int irq, void *privdata)
csr_ops->write_csr_int_flag_and_col(bank->csr_addr,
bank->bank_number, 0);
tasklet_hi_schedule(&bank->resp_handler);
return IRQ_HANDLED;
handled = true;
}
return IRQ_NONE;
return handled ? IRQ_HANDLED : IRQ_NONE;
}
static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)

View File

@@ -81,10 +81,10 @@ void adf_init_hw_data_dh895xcciov(struct adf_hw_device_data *hw_data)
hw_data->enable_error_correction = adf_vf_void_noop;
hw_data->init_admin_comms = adf_vf_int_noop;
hw_data->exit_admin_comms = adf_vf_void_noop;
hw_data->send_admin_init = adf_vf2pf_init;
hw_data->send_admin_init = adf_vf2pf_notify_init;
hw_data->init_arb = adf_vf_int_noop;
hw_data->exit_arb = adf_vf_void_noop;
hw_data->disable_iov = adf_vf2pf_shutdown;
hw_data->disable_iov = adf_vf2pf_notify_shutdown;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_num_accels = get_num_accels;

View File

@@ -243,6 +243,9 @@ static struct acpi_device *to_cxl_host_bridge(struct device *dev)
{
struct acpi_device *adev = to_acpi_device(dev);
if (!acpi_pci_find_root(adev->handle))
return NULL;
if (strcmp(acpi_device_hid(adev), "ACPI0016") == 0)
return adev;
return NULL;
@@ -266,10 +269,6 @@ static int add_host_bridge_uport(struct device *match, void *arg)
if (!bridge)
return 0;
pci_root = acpi_pci_find_root(bridge->handle);
if (!pci_root)
return -ENXIO;
dport = find_dport_by_dev(root_port, match);
if (!dport) {
dev_dbg(host, "host bridge expected and not found\n");
@@ -282,6 +281,11 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return PTR_ERR(port);
dev_dbg(host, "%s: add: %s\n", dev_name(match), dev_name(&port->dev));
/*
* Note that this lookup already succeeded in
* to_cxl_host_bridge(), so no need to check for failure here
*/
pci_root = acpi_pci_find_root(bridge->handle);
ctx = (struct cxl_walk_context){
.dev = host,
.root = pci_root->bus,

Some files were not shown because too many files have changed in this diff Show More