Merge branches 'x86/cleanups', 'x86/mpparse', 'x86/numa' and 'x86/uv' into x86/urgent
This commit is contained in:
@@ -190,16 +190,23 @@
|
||||
/* FIXME: move this macro to <linux/pci.h> */
|
||||
#define PCI_BUS(x) (((x) >> 8) & 0xff)
|
||||
|
||||
/* Protection domain flags */
|
||||
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
|
||||
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
|
||||
domain for an IOMMU */
|
||||
|
||||
/*
|
||||
* This structure contains generic data for IOMMU protection domains
|
||||
* independent of their use.
|
||||
*/
|
||||
struct protection_domain {
|
||||
spinlock_t lock; /* mostly used to lock the page table*/
|
||||
u16 id; /* the domain id written to the device table */
|
||||
int mode; /* paging mode (0-6 levels) */
|
||||
u64 *pt_root; /* page table root pointer */
|
||||
void *priv; /* private data */
|
||||
spinlock_t lock; /* mostly used to lock the page table*/
|
||||
u16 id; /* the domain id written to the device table */
|
||||
int mode; /* paging mode (0-6 levels) */
|
||||
u64 *pt_root; /* page table root pointer */
|
||||
unsigned long flags; /* flags to find out type of domain */
|
||||
unsigned dev_cnt; /* devices assigned to this domain */
|
||||
void *priv; /* private data */
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -295,7 +302,7 @@ struct amd_iommu {
|
||||
bool int_enabled;
|
||||
|
||||
/* if one, we need to send a completion wait command */
|
||||
int need_sync;
|
||||
bool need_sync;
|
||||
|
||||
/* default dma_ops domain for that IOMMU */
|
||||
struct dma_ops_domain *default_dom;
|
||||
@@ -374,7 +381,7 @@ extern struct protection_domain **amd_iommu_pd_table;
|
||||
extern unsigned long *amd_iommu_pd_alloc_bitmap;
|
||||
|
||||
/* will be 1 if device isolation is enabled */
|
||||
extern int amd_iommu_isolate;
|
||||
extern bool amd_iommu_isolate;
|
||||
|
||||
/*
|
||||
* If true, the addresses will be flushed on unmap time, not when
|
||||
@@ -382,18 +389,6 @@ extern int amd_iommu_isolate;
|
||||
*/
|
||||
extern bool amd_iommu_unmap_flush;
|
||||
|
||||
/* takes a PCI device id and prints it out in a readable form */
|
||||
static inline void print_devid(u16 devid, int nl)
|
||||
{
|
||||
int bus = devid >> 8;
|
||||
int dev = devid >> 3 & 0x1f;
|
||||
int fn = devid & 0x07;
|
||||
|
||||
printk("%02x:%02x.%x", bus, dev, fn);
|
||||
if (nl)
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
/* takes bus and device/function and returns the device id
|
||||
* FIXME: should that be in generic PCI code? */
|
||||
static inline u16 calc_devid(u8 bus, u8 devfn)
|
||||
@@ -401,4 +396,32 @@ static inline u16 calc_devid(u8 bus, u8 devfn)
|
||||
return (((u16)bus) << 8) | devfn;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_STATS
|
||||
|
||||
struct __iommu_counter {
|
||||
char *name;
|
||||
struct dentry *dent;
|
||||
u64 value;
|
||||
};
|
||||
|
||||
#define DECLARE_STATS_COUNTER(nm) \
|
||||
static struct __iommu_counter nm = { \
|
||||
.name = #nm, \
|
||||
}
|
||||
|
||||
#define INC_STATS_COUNTER(name) name.value += 1
|
||||
#define ADD_STATS_COUNTER(name, x) name.value += (x)
|
||||
#define SUB_STATS_COUNTER(name, x) name.value -= (x)
|
||||
|
||||
#else /* CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
#define DECLARE_STATS_COUNTER(name)
|
||||
#define INC_STATS_COUNTER(name)
|
||||
#define ADD_STATS_COUNTER(name, x)
|
||||
#define SUB_STATS_COUNTER(name, x)
|
||||
|
||||
static inline void amd_iommu_stats_init(void) { }
|
||||
|
||||
#endif /* CONFIG_AMD_IOMMU_STATS */
|
||||
|
||||
#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */
|
||||
|
||||
@@ -157,7 +157,7 @@ cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
|
||||
|
||||
num_bits_set = cpumask_weight(cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == NR_CPUS)
|
||||
if (num_bits_set == nr_cpu_ids)
|
||||
return 0xFF;
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
@@ -190,7 +190,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
|
||||
num_bits_set = cpus_weight(*cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == NR_CPUS)
|
||||
if (num_bits_set == nr_cpu_ids)
|
||||
return cpu_to_logical_apicid(0);
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
@@ -218,9 +218,6 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid = cpu_to_logical_apicid(0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
@@ -229,31 +226,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
|
||||
cpumask_and(cpumask, inmask, andmask);
|
||||
cpumask_and(cpumask, cpumask, cpu_online_mask);
|
||||
apicid = cpu_mask_to_apicid(cpumask);
|
||||
|
||||
num_bits_set = cpumask_weight(cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == NR_CPUS)
|
||||
goto exit;
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = cpumask_first(cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpumask_test_cpu(cpu, cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
printk ("%s: Not a valid mask!\n", __func__);
|
||||
return cpu_to_logical_apicid(0);
|
||||
}
|
||||
apicid = new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
exit:
|
||||
free_cpumask_var(cpumask);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
@@ -10,8 +10,7 @@ extern void setup_unisys(void);
|
||||
|
||||
#ifndef CONFIG_X86_GENERICARCH
|
||||
extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
|
||||
extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
|
||||
char *productid);
|
||||
extern int mps_oem_check(struct mpc_table *mpc, char *oem, char *productid);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
* Copyright 2003 Andi Kleen, SuSE Labs.
|
||||
*/
|
||||
|
||||
struct mpc_config_bus;
|
||||
struct mp_config_table;
|
||||
struct mpc_config_processor;
|
||||
struct mpc_bus;
|
||||
struct mpc_table;
|
||||
struct mpc_cpu;
|
||||
|
||||
struct genapic {
|
||||
char *name;
|
||||
@@ -51,7 +51,7 @@ struct genapic {
|
||||
/* When one of the next two hooks returns 1 the genapic
|
||||
is switched to this. Essentially they are additional probe
|
||||
functions. */
|
||||
int (*mps_oem_check)(struct mp_config_table *mpc, char *oem,
|
||||
int (*mps_oem_check)(struct mpc_table *mpc, char *oem,
|
||||
char *productid);
|
||||
int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
|
||||
|
||||
|
||||
@@ -360,7 +360,7 @@ struct kvm_arch{
|
||||
struct list_head active_mmu_pages;
|
||||
struct list_head assigned_dev_head;
|
||||
struct list_head oos_global_pages;
|
||||
struct dmar_domain *intel_iommu_domain;
|
||||
struct iommu_domain *iommu_domain;
|
||||
struct kvm_pic *vpic;
|
||||
struct kvm_ioapic *vioapic;
|
||||
struct kvm_pit *vpit;
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
#define SHARED_SWITCHER_PAGES \
|
||||
DIV_ROUND_UP(end_switcher_text - start_switcher_text, PAGE_SIZE)
|
||||
/* Pages for switcher itself, then two pages per cpu */
|
||||
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * NR_CPUS)
|
||||
#define TOTAL_SWITCHER_PAGES (SHARED_SWITCHER_PAGES + 2 * nr_cpu_ids)
|
||||
|
||||
/* We map at -4M for ease of mapping into the guest (one PTE page). */
|
||||
#define SWITCHER_ADDR 0xFFC00000
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
#ifndef _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
|
||||
#define _ASM_X86_MACH_DEFAULT_MACH_MPPARSE_H
|
||||
|
||||
static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
|
||||
char *productid)
|
||||
static inline int
|
||||
mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
#define _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H
|
||||
|
||||
|
||||
extern int mps_oem_check(struct mp_config_table *mpc, char *oem,
|
||||
char *productid);
|
||||
extern int mps_oem_check(struct mpc_table *, char *, char *);
|
||||
|
||||
extern int acpi_madt_oem_check(char *oem_id, char *oem_table_id);
|
||||
extern int acpi_madt_oem_check(char *, char *);
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_MACH_MPPARSE_H */
|
||||
|
||||
@@ -7,6 +7,6 @@
|
||||
/* Maximum 256 PCI busses, plus 1 ISA bus in each of 4 cabinets. */
|
||||
#define MAX_MP_BUSSES 260
|
||||
|
||||
extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
|
||||
char *productid);
|
||||
extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
|
||||
|
||||
#endif /* _ASM_X86_MACH_GENERIC_MACH_MPSPEC_H */
|
||||
|
||||
@@ -39,17 +39,17 @@ struct intel_mp_floating {
|
||||
|
||||
#define MPC_SIGNATURE "PCMP"
|
||||
|
||||
struct mp_config_table {
|
||||
char mpc_signature[4];
|
||||
unsigned short mpc_length; /* Size of table */
|
||||
char mpc_spec; /* 0x01 */
|
||||
char mpc_checksum;
|
||||
char mpc_oem[8];
|
||||
char mpc_productid[12];
|
||||
unsigned int mpc_oemptr; /* 0 if not present */
|
||||
unsigned short mpc_oemsize; /* 0 if not present */
|
||||
unsigned short mpc_oemcount;
|
||||
unsigned int mpc_lapic; /* APIC address */
|
||||
struct mpc_table {
|
||||
char signature[4];
|
||||
unsigned short length; /* Size of table */
|
||||
char spec; /* 0x01 */
|
||||
char checksum;
|
||||
char oem[8];
|
||||
char productid[12];
|
||||
unsigned int oemptr; /* 0 if not present */
|
||||
unsigned short oemsize; /* 0 if not present */
|
||||
unsigned short oemcount;
|
||||
unsigned int lapic; /* APIC address */
|
||||
unsigned int reserved;
|
||||
};
|
||||
|
||||
@@ -70,20 +70,20 @@ struct mp_config_table {
|
||||
#define CPU_MODEL_MASK 0x00F0
|
||||
#define CPU_FAMILY_MASK 0x0F00
|
||||
|
||||
struct mpc_config_processor {
|
||||
unsigned char mpc_type;
|
||||
unsigned char mpc_apicid; /* Local APIC number */
|
||||
unsigned char mpc_apicver; /* Its versions */
|
||||
unsigned char mpc_cpuflag;
|
||||
unsigned int mpc_cpufeature;
|
||||
unsigned int mpc_featureflag; /* CPUID feature value */
|
||||
unsigned int mpc_reserved[2];
|
||||
struct mpc_cpu {
|
||||
unsigned char type;
|
||||
unsigned char apicid; /* Local APIC number */
|
||||
unsigned char apicver; /* Its versions */
|
||||
unsigned char cpuflag;
|
||||
unsigned int cpufeature;
|
||||
unsigned int featureflag; /* CPUID feature value */
|
||||
unsigned int reserved[2];
|
||||
};
|
||||
|
||||
struct mpc_config_bus {
|
||||
unsigned char mpc_type;
|
||||
unsigned char mpc_busid;
|
||||
unsigned char mpc_bustype[6];
|
||||
struct mpc_bus {
|
||||
unsigned char type;
|
||||
unsigned char busid;
|
||||
unsigned char bustype[6];
|
||||
};
|
||||
|
||||
/* List of Bus Type string values, Intel MP Spec. */
|
||||
@@ -108,22 +108,22 @@ struct mpc_config_bus {
|
||||
|
||||
#define MPC_APIC_USABLE 0x01
|
||||
|
||||
struct mpc_config_ioapic {
|
||||
unsigned char mpc_type;
|
||||
unsigned char mpc_apicid;
|
||||
unsigned char mpc_apicver;
|
||||
unsigned char mpc_flags;
|
||||
unsigned int mpc_apicaddr;
|
||||
struct mpc_ioapic {
|
||||
unsigned char type;
|
||||
unsigned char apicid;
|
||||
unsigned char apicver;
|
||||
unsigned char flags;
|
||||
unsigned int apicaddr;
|
||||
};
|
||||
|
||||
struct mpc_config_intsrc {
|
||||
unsigned char mpc_type;
|
||||
unsigned char mpc_irqtype;
|
||||
unsigned short mpc_irqflag;
|
||||
unsigned char mpc_srcbus;
|
||||
unsigned char mpc_srcbusirq;
|
||||
unsigned char mpc_dstapic;
|
||||
unsigned char mpc_dstirq;
|
||||
struct mpc_intsrc {
|
||||
unsigned char type;
|
||||
unsigned char irqtype;
|
||||
unsigned short irqflag;
|
||||
unsigned char srcbus;
|
||||
unsigned char srcbusirq;
|
||||
unsigned char dstapic;
|
||||
unsigned char dstirq;
|
||||
};
|
||||
|
||||
enum mp_irq_source_types {
|
||||
@@ -139,24 +139,24 @@ enum mp_irq_source_types {
|
||||
|
||||
#define MP_APIC_ALL 0xFF
|
||||
|
||||
struct mpc_config_lintsrc {
|
||||
unsigned char mpc_type;
|
||||
unsigned char mpc_irqtype;
|
||||
unsigned short mpc_irqflag;
|
||||
unsigned char mpc_srcbusid;
|
||||
unsigned char mpc_srcbusirq;
|
||||
unsigned char mpc_destapic;
|
||||
unsigned char mpc_destapiclint;
|
||||
struct mpc_lintsrc {
|
||||
unsigned char type;
|
||||
unsigned char irqtype;
|
||||
unsigned short irqflag;
|
||||
unsigned char srcbusid;
|
||||
unsigned char srcbusirq;
|
||||
unsigned char destapic;
|
||||
unsigned char destapiclint;
|
||||
};
|
||||
|
||||
#define MPC_OEM_SIGNATURE "_OEM"
|
||||
|
||||
struct mp_config_oemtable {
|
||||
char oem_signature[4];
|
||||
unsigned short oem_length; /* Size of table */
|
||||
char oem_rev; /* 0x01 */
|
||||
char oem_checksum;
|
||||
char mpc_oem[8];
|
||||
struct mpc_oemtable {
|
||||
char signature[4];
|
||||
unsigned short length; /* Size of table */
|
||||
char rev; /* 0x01 */
|
||||
char checksum;
|
||||
char mpc[8];
|
||||
};
|
||||
|
||||
/*
|
||||
|
||||
@@ -63,8 +63,8 @@ static inline physid_mask_t ioapic_phys_id_map(physid_mask_t phys_map)
|
||||
extern u8 cpu_2_logical_apicid[];
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
if (cpu >= NR_CPUS)
|
||||
return BAD_APICID;
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return (int)cpu_2_logical_apicid[cpu];
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#ifndef __ASM_NUMAQ_MPPARSE_H
|
||||
#define __ASM_NUMAQ_MPPARSE_H
|
||||
|
||||
extern void numaq_mps_oem_check(struct mp_config_table *mpc, char *oem,
|
||||
char *productid);
|
||||
extern void numaq_mps_oem_check(struct mpc_table *, char *, char *);
|
||||
|
||||
#endif /* __ASM_NUMAQ_MPPARSE_H */
|
||||
|
||||
@@ -102,9 +102,9 @@ extern void pci_iommu_alloc(void);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
/* Returns the node based on pci bus */
|
||||
static inline int __pcibus_to_node(struct pci_bus *bus)
|
||||
static inline int __pcibus_to_node(const struct pci_bus *bus)
|
||||
{
|
||||
struct pci_sysdata *sd = bus->sysdata;
|
||||
const struct pci_sysdata *sd = bus->sysdata;
|
||||
|
||||
return sd->node;
|
||||
}
|
||||
@@ -113,6 +113,12 @@ static inline cpumask_t __pcibus_to_cpumask(struct pci_bus *bus)
|
||||
{
|
||||
return node_to_cpumask(__pcibus_to_node(bus));
|
||||
}
|
||||
|
||||
static inline const struct cpumask *
|
||||
cpumask_of_pcibus(const struct pci_bus *bus)
|
||||
{
|
||||
return cpumask_of_node(__pcibus_to_node(bus));
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_PCI_H */
|
||||
|
||||
@@ -25,9 +25,9 @@ extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
|
||||
/*
|
||||
* Any setup quirks to be performed?
|
||||
*/
|
||||
struct mpc_config_processor;
|
||||
struct mpc_config_bus;
|
||||
struct mp_config_oemtable;
|
||||
struct mpc_cpu;
|
||||
struct mpc_bus;
|
||||
struct mpc_oemtable;
|
||||
struct x86_quirks {
|
||||
int (*arch_pre_time_init)(void);
|
||||
int (*arch_time_init)(void);
|
||||
@@ -39,10 +39,10 @@ struct x86_quirks {
|
||||
int (*mach_find_smp_config)(unsigned int reserve);
|
||||
|
||||
int *mpc_record;
|
||||
int (*mpc_apic_id)(struct mpc_config_processor *m);
|
||||
void (*mpc_oem_bus_info)(struct mpc_config_bus *m, char *name);
|
||||
void (*mpc_oem_pci_bus)(struct mpc_config_bus *m);
|
||||
void (*smp_read_mpc_oem)(struct mp_config_oemtable *oemtable,
|
||||
int (*mpc_apic_id)(struct mpc_cpu *m);
|
||||
void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
|
||||
void (*mpc_oem_pci_bus)(struct mpc_bus *m);
|
||||
void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
|
||||
unsigned short oemsize);
|
||||
int (*setup_ioapic_ids)(void);
|
||||
int (*update_genapic)(void);
|
||||
|
||||
@@ -52,7 +52,7 @@ static inline void init_apic_ldr(void)
|
||||
int i;
|
||||
|
||||
/* Create logical APIC IDs by counting CPUs already in cluster. */
|
||||
for (count = 0, i = NR_CPUS; --i >= 0; ) {
|
||||
for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
|
||||
lid = cpu_2_logical_apicid[i];
|
||||
if (lid != BAD_APICID && apicid_cluster(lid) == my_cluster)
|
||||
++count;
|
||||
@@ -97,8 +97,8 @@ static inline int apicid_to_node(int logical_apicid)
|
||||
static inline int cpu_to_logical_apicid(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu >= NR_CPUS)
|
||||
return BAD_APICID;
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return BAD_APICID;
|
||||
return (int)cpu_2_logical_apicid[cpu];
|
||||
#else
|
||||
return logical_smp_processor_id();
|
||||
@@ -107,7 +107,7 @@ static inline int cpu_to_logical_apicid(int cpu)
|
||||
|
||||
static inline int cpu_present_to_apicid(int mps_cpu)
|
||||
{
|
||||
if (mps_cpu < NR_CPUS)
|
||||
if (mps_cpu < nr_cpu_ids)
|
||||
return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
@@ -146,7 +146,7 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
|
||||
num_bits_set = cpus_weight(*cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == NR_CPUS)
|
||||
if (num_bits_set >= nr_cpu_ids)
|
||||
return (int) 0xFF;
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
@@ -173,42 +173,16 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
|
||||
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
|
||||
const struct cpumask *andmask)
|
||||
{
|
||||
int num_bits_set;
|
||||
int cpus_found = 0;
|
||||
int cpu;
|
||||
int apicid = 0xFF;
|
||||
int apicid = cpu_to_logical_apicid(0);
|
||||
cpumask_var_t cpumask;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
|
||||
return (int) 0xFF;
|
||||
return apicid;
|
||||
|
||||
cpumask_and(cpumask, inmask, andmask);
|
||||
cpumask_and(cpumask, cpumask, cpu_online_mask);
|
||||
apicid = cpu_mask_to_apicid(cpumask);
|
||||
|
||||
num_bits_set = cpumask_weight(cpumask);
|
||||
/* Return id to all */
|
||||
if (num_bits_set == nr_cpu_ids)
|
||||
goto exit;
|
||||
/*
|
||||
* The cpus in the mask must all be on the apic cluster. If are not
|
||||
* on the same apicid cluster return default value of TARGET_CPUS.
|
||||
*/
|
||||
cpu = cpumask_first(cpumask);
|
||||
apicid = cpu_to_logical_apicid(cpu);
|
||||
while (cpus_found < num_bits_set) {
|
||||
if (cpumask_test_cpu(cpu, cpumask)) {
|
||||
int new_apicid = cpu_to_logical_apicid(cpu);
|
||||
if (apicid_cluster(apicid) !=
|
||||
apicid_cluster(new_apicid)){
|
||||
printk ("%s: Not a valid mask!\n", __func__);
|
||||
return 0xFF;
|
||||
}
|
||||
apicid = apicid | new_apicid;
|
||||
cpus_found++;
|
||||
}
|
||||
cpu++;
|
||||
}
|
||||
exit:
|
||||
free_cpumask_var(cpumask);
|
||||
return apicid;
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ extern void setup_summit(void);
|
||||
#define setup_summit() {}
|
||||
#endif
|
||||
|
||||
static inline int mps_oem_check(struct mp_config_table *mpc, char *oem,
|
||||
static inline int mps_oem_check(struct mpc_table *mpc, char *oem,
|
||||
char *productid)
|
||||
{
|
||||
if (!strncmp(oem, "IBM ENSW", 8) &&
|
||||
|
||||
@@ -61,13 +61,19 @@ static inline int cpu_to_node(int cpu)
|
||||
*
|
||||
* Side note: this function creates the returned cpumask on the stack
|
||||
* so with a high NR_CPUS count, excessive stack space is used. The
|
||||
* node_to_cpumask_ptr function should be used whenever possible.
|
||||
* cpumask_of_node function should be used whenever possible.
|
||||
*/
|
||||
static inline cpumask_t node_to_cpumask(int node)
|
||||
{
|
||||
return node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
/* Returns a bitmask of CPUs on Node 'node'. */
|
||||
static inline const struct cpumask *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
||||
/* Mappings between node number and cpus on that node. */
|
||||
@@ -82,7 +88,7 @@ DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
|
||||
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
|
||||
extern int cpu_to_node(int cpu);
|
||||
extern int early_cpu_to_node(int cpu);
|
||||
extern const cpumask_t *_node_to_cpumask_ptr(int node);
|
||||
extern const cpumask_t *cpumask_of_node(int node);
|
||||
extern cpumask_t node_to_cpumask(int node);
|
||||
|
||||
#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
@@ -103,7 +109,7 @@ static inline int early_cpu_to_node(int cpu)
|
||||
}
|
||||
|
||||
/* Returns a pointer to the cpumask of CPUs on Node 'node'. */
|
||||
static inline const cpumask_t *_node_to_cpumask_ptr(int node)
|
||||
static inline const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
@@ -116,12 +122,15 @@ static inline cpumask_t node_to_cpumask(int node)
|
||||
|
||||
#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
|
||||
|
||||
/* Replace default node_to_cpumask_ptr with optimized version */
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
const cpumask_t *v = _node_to_cpumask_ptr(node)
|
||||
const cpumask_t *v = cpumask_of_node(node)
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = _node_to_cpumask_ptr(node)
|
||||
v = cpumask_of_node(node)
|
||||
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
@@ -187,7 +196,7 @@ extern int __node_distance(int, int);
|
||||
#define cpu_to_node(cpu) 0
|
||||
#define early_cpu_to_node(cpu) 0
|
||||
|
||||
static inline const cpumask_t *_node_to_cpumask_ptr(int node)
|
||||
static inline const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
return &cpu_online_map;
|
||||
}
|
||||
@@ -200,12 +209,15 @@ static inline int node_to_first_cpu(int node)
|
||||
return first_cpu(cpu_online_map);
|
||||
}
|
||||
|
||||
/* Replace default node_to_cpumask_ptr with optimized version */
|
||||
/*
|
||||
* Replace default node_to_cpumask_ptr with optimized version
|
||||
* Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
|
||||
*/
|
||||
#define node_to_cpumask_ptr(v, node) \
|
||||
const cpumask_t *v = _node_to_cpumask_ptr(node)
|
||||
const cpumask_t *v = cpumask_of_node(node)
|
||||
|
||||
#define node_to_cpumask_ptr_next(v, node) \
|
||||
v = _node_to_cpumask_ptr(node)
|
||||
v = cpumask_of_node(node)
|
||||
#endif
|
||||
|
||||
#include <asm-generic/topology.h>
|
||||
@@ -214,12 +226,12 @@ static inline int node_to_first_cpu(int node)
|
||||
/* Returns the number of the first CPU on Node 'node'. */
|
||||
static inline int node_to_first_cpu(int node)
|
||||
{
|
||||
node_to_cpumask_ptr(mask, node);
|
||||
return first_cpu(*mask);
|
||||
return cpumask_first(cpumask_of_node(node));
|
||||
}
|
||||
#endif
|
||||
|
||||
extern cpumask_t cpu_coregroup_map(int cpu);
|
||||
extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
||||
|
||||
#ifdef ENABLE_TOPO_DEFINES
|
||||
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
|
||||
|
||||
Reference in New Issue
Block a user