summaryrefslogtreecommitdiff
path: root/arch/ia64/include/asm/acpi.h
blob: 0ea569040c5bf0171186ca1fac1473ba671d9e29 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
 *  Copyright (C) 1999 VA Linux Systems
 *  Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
 *  Copyright (C) 2000,2001 J.I. Lee <jung-ik.lee@intel.com>
 *  Copyright (C) 2001,2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
 */

#ifndef _ASM_ACPI_H
#define _ASM_ACPI_H

#ifdef __KERNEL__

#include <acpi/pdc_intel.h>

#include <linux/init.h>
#include <linux/numa.h>
#include <asm/numa.h>

#ifdef	CONFIG_ACPI
extern int acpi_lapic;
#define acpi_disabled 0	/* ACPI always enabled on IA64 */
#define acpi_noirq 0	/* ACPI always enabled on IA64 */
#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
#define acpi_strict 1	/* no ACPI spec workarounds on IA64 */

static inline bool acpi_has_cpu_in_madt(void)
{
	return !!acpi_lapic;
}
#endif
#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
static inline void disable_acpi(void) { }

#ifdef CONFIG_IA64_GENERIC
const char *acpi_get_sysname (void);
#else
static inline const char *acpi_get_sysname (void)
{
# if defined (CONFIG_IA64_HP_SIM)
	return "hpsim";
# elif defined (CONFIG_IA64_HP_ZX1)
	return "hpzx1";
# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
	return "hpzx1_swiotlb";
# elif defined (CONFIG_IA64_SGI_SN2)
	return "sn2";
# elif defined (CONFIG_IA64_SGI_UV)
	return "uv";
# elif defined (CONFIG_IA64_DIG)
	return "dig";
# elif defined(CONFIG_IA64_DIG_VTD)
	return "dig_vtd";
# else
#	error Unknown platform.  Fix acpi.c.
# endif
}
#endif
int acpi_request_vector (u32 int_type);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);

/* Low-level suspend routine. */
extern int acpi_suspend_lowlevel(void);

extern unsigned long acpi_wakeup_address;

/*
 * Record the cpei override flag and current logical cpu. This is
 * useful for CPU removal.
 */
extern unsigned int can_cpei_retarget(void);
extern unsigned int is_cpu_cpei_target(unsigned int cpu);
extern void set_cpei_target_cpu(unsigned int cpu);
extern unsigned int get_cpei_target_cpu(void);
extern void prefill_possible_map(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
extern int additional_cpus;
#else
#define additional_cpus 0
#endif

#ifdef CONFIG_ACPI_NUMA
#if MAX_NUMNODES > 256
#define MAX_PXM_DOMAINS MAX_NUMNODES
#else
#define MAX_PXM_DOMAINS (256)
#endif
extern int pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif

static inline bool arch_has_acpi_pdc(void) { return true; }
static inline void arch_acpi_set_pdc_bits(u32 *buf)
{
	buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
}

#define acpi_unlazy_tlb(x)

#ifdef CONFIG_ACPI_NUMA
extern cpumask_t early_cpu_possible_map;
#define for_each_possible_early_cpu(cpu)  \
	for_each_cpu((cpu), &early_cpu_possible_map)

static inline void per_cpu_scan_finalize(int min_cpus, int reserve_cpus)
{
	int low_cpu, high_cpu;
	int cpu;
	int next_nid = 0;

	low_cpu = cpumask_weight(&early_cpu_possible_map);

	high_cpu = max(low_cpu, min_cpus);
	high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);

	for (cpu = low_cpu; cpu < high_cpu; cpu++) {
		cpumask_set_cpu(cpu, &early_cpu_possible_map);
		if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
			node_cpuid[cpu].nid = next_nid;
			next_nid++;
			if (next_nid >= num_online_nodes())
				next_nid = 0;
		}
	}
}

extern void acpi_numa_fixup(void);

#endif /* CONFIG_ACPI_NUMA */

#endif /*__KERNEL__*/

#endif /*_ASM_ACPI_H*/