diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile index 3242e591fa82..26b78d86f25a 100644 --- a/arch/x86/kernel/acpi/Makefile +++ b/arch/x86/kernel/acpi/Makefile @@ -1,6 +1,7 @@ obj-$(CONFIG_ACPI) += boot.o obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o obj-$(CONFIG_ACPI_APEI) += apei.o +obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_msr.o ifneq ($(CONFIG_ACPI_PROCESSOR),) obj-y += cstate.o diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 90d84c3eee53..ccd27fe9ca81 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1031,8 +1031,8 @@ static int __init acpi_parse_madt_lapic_entries(void) return ret; } - x2count = madt_proc[0].count; - count = madt_proc[1].count; + count = madt_proc[0].count; + x2count = madt_proc[1].count; } if (!count && !x2count) { printk(KERN_ERR PREFIX "No LAPIC entries present\n"); @@ -1513,7 +1513,7 @@ void __init acpi_boot_table_init(void) * If acpi_disabled, bail out */ if (acpi_disabled) - return; + return; /* * Initialize the ACPI boot-time table parser. diff --git a/arch/x86/kernel/acpi/cppc_msr.c b/arch/x86/kernel/acpi/cppc_msr.c new file mode 100644 index 000000000000..6fb478bf82fd --- /dev/null +++ b/arch/x86/kernel/acpi/cppc_msr.c @@ -0,0 +1,58 @@ +/* + * cppc_msr.c: MSR Interface for CPPC + * Copyright (c) 2016, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#include +#include + +/* Refer to drivers/acpi/cppc_acpi.c for the description of functions */ + +bool cpc_ffh_supported(void) +{ + return true; +} + +int cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) +{ + int err; + + err = rdmsrl_safe_on_cpu(cpunum, reg->address, val); + if (!err) { + u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, + reg->bit_offset); + + *val &= mask; + *val >>= reg->bit_offset; + } + return err; +} + +int cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) +{ + u64 rd_val; + int err; + + err = rdmsrl_safe_on_cpu(cpunum, reg->address, &rd_val); + if (!err) { + u64 mask = GENMASK_ULL(reg->bit_offset + reg->bit_width - 1, + reg->bit_offset); + + val <<= reg->bit_offset; + val &= mask; + rd_val &= ~mask; + rd_val |= val; + err = wrmsrl_safe_on_cpu(cpunum, reg->address, rd_val); + } + return err; +} diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 445ce28475b3..c6bb6aa5ac4b 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -227,7 +227,6 @@ config ACPI_MCFG config ACPI_CPPC_LIB bool depends on ACPI_PROCESSOR - depends on !ACPI_CPU_FREQ_PSS select MAILBOX select PCC help diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 5f112d811e42..d58fbf7f04e6 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -72,7 +72,7 @@ static int acpi_apd_setup(struct apd_private_data *pdata) } #ifdef CONFIG_X86_AMD_PLATFORM_DEVICE -static struct apd_device_desc cz_i2c_desc = { +static const struct apd_device_desc cz_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 133000000, }; @@ -84,7 +84,7 @@ static struct property_entry uart_properties[] = { { }, }; -static struct apd_device_desc cz_uart_desc = { +static const struct apd_device_desc cz_uart_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 48000000, .properties = uart_properties, @@ -92,10 +92,15 @@ static struct apd_device_desc cz_uart_desc = { #endif #ifdef CONFIG_ARM64 -static struct apd_device_desc xgene_i2c_desc = { +static const struct apd_device_desc xgene_i2c_desc = { .setup = acpi_apd_setup, .fixed_clk_rate = 100000000, }; + +static const struct apd_device_desc vulcan_spi_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 133000000, +}; #endif #else @@ -164,6 +169,7 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { #endif #ifdef CONFIG_ARM64 { "APMC0D0F", APD_ADDR(xgene_i2c_desc) }, + { "BRCM900D", APD_ADDR(vulcan_spi_desc) }, #endif { } }; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 2e981732805b..d0d0504b7c89 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -40,15 +40,48 @@ #include #include #include +#include +#include #include -/* - * Lock to provide mutually exclusive access to the PCC - * channel. e.g. When the remote updates the shared region - * with new data, the reader needs to be protected from - * other CPUs activity on the same channel. - */ -static DEFINE_SPINLOCK(pcc_lock); + +struct cppc_pcc_data { + struct mbox_chan *pcc_channel; + void __iomem *pcc_comm_addr; + int pcc_subspace_idx; + bool pcc_channel_acquired; + ktime_t deadline; + unsigned int pcc_mpar, pcc_mrtt, pcc_nominal; + + bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */ + bool platform_owns_pcc; /* Ownership of PCC subspace */ + unsigned int pcc_write_cnt; /* Running count of PCC write commands */ + + /* + * Lock to provide controlled access to the PCC channel. + * + * For performance critical usecases(currently cppc_set_perf) + * We need to take read_lock and check if channel belongs to OSPM + * before reading or writing to PCC subspace + * We need to take write_lock before transferring the channel + * ownership to the platform via a Doorbell + * This allows us to batch a number of CPPC requests if they happen + * to originate in about the same time + * + * For non-performance critical usecases(init) + * Take write_lock for all purposes which gives exclusive access + */ + struct rw_semaphore pcc_lock; + + /* Wait queue for CPUs whose requests were batched */ + wait_queue_head_t pcc_write_wait_q; +}; + +/* Structure to represent the single PCC channel */ +static struct cppc_pcc_data pcc_data = { + .pcc_subspace_idx = -1, + .platform_owns_pcc = true, +}; /* * The cpc_desc structure contains the ACPI register details @@ -59,18 +92,25 @@ static DEFINE_SPINLOCK(pcc_lock); */ static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr); -/* This layer handles all the PCC specifics for CPPC. */ -static struct mbox_chan *pcc_channel; -static void __iomem *pcc_comm_addr; -static u64 comm_base_addr; -static int pcc_subspace_idx = -1; -static bool pcc_channel_acquired; -static ktime_t deadline; -static unsigned int pcc_mpar, pcc_mrtt; - /* pcc mapped address + header size + offset within PCC subspace */ -#define GET_PCC_VADDR(offs) (pcc_comm_addr + 0x8 + (offs)) +#define GET_PCC_VADDR(offs) (pcc_data.pcc_comm_addr + 0x8 + (offs)) +/* Check if a CPC regsiter is in PCC */ +#define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \ + (cpc)->cpc_entry.reg.space_id == \ + ACPI_ADR_SPACE_PLATFORM_COMM) + +/* Evalutes to True if reg is a NULL register descriptor */ +#define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \ + (reg)->address == 0 && \ + (reg)->bit_width == 0 && \ + (reg)->bit_offset == 0 && \ + (reg)->access_width == 0) + +/* Evalutes to True if an optional cpc field is supported */ +#define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \ + !!(cpc)->cpc_entry.int_value : \ + !IS_NULL_REG(&(cpc)->cpc_entry.reg)) /* * Arbitrary Retries in case the remote processor is slow to respond * to PCC commands. Keeping it high enough to cover emulators where @@ -78,11 +118,79 @@ static unsigned int pcc_mpar, pcc_mrtt; */ #define NUM_RETRIES 500 -static int check_pcc_chan(void) +struct cppc_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *c, ssize_t count); +}; + +#define define_one_cppc_ro(_name) \ +static struct cppc_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj) + +static ssize_t show_feedback_ctrs(struct kobject *kobj, + struct attribute *attr, char *buf) { - int ret = -EIO; - struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_comm_addr; - ktime_t next_deadline = ktime_add(ktime_get(), deadline); + struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + + cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); + + return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n", + fb_ctrs.reference, fb_ctrs.delivered); +} +define_one_cppc_ro(feedback_ctrs); + +static ssize_t show_reference_perf(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + + cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", + fb_ctrs.reference_perf); +} +define_one_cppc_ro(reference_perf); + +static ssize_t show_wraparound_time(struct kobject *kobj, + struct attribute *attr, char *buf) +{ + struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); + struct cppc_perf_fb_ctrs fb_ctrs = {0}; + + cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs); + + return scnprintf(buf, PAGE_SIZE, "%llu\n", fb_ctrs.ctr_wrap_time); + +} +define_one_cppc_ro(wraparound_time); + +static struct attribute *cppc_attrs[] = { + &feedback_ctrs.attr, + &reference_perf.attr, + &wraparound_time.attr, + NULL +}; + +static struct kobj_type cppc_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = cppc_attrs, +}; + +static int check_pcc_chan(bool chk_err_bit) +{ + int ret = -EIO, status = 0; + struct acpi_pcct_shared_memory __iomem *generic_comm_base = pcc_data.pcc_comm_addr; + ktime_t next_deadline = ktime_add(ktime_get(), pcc_data.deadline); + + if (!pcc_data.platform_owns_pcc) + return 0; /* Retry in case the remote processor was too slow to catch up. */ while (!ktime_after(ktime_get(), next_deadline)) { @@ -91,8 +199,11 @@ static int check_pcc_chan(void) * platform and should have set the command completion bit when * PCC can be used by OSPM */ - if (readw_relaxed(&generic_comm_base->status) & PCC_CMD_COMPLETE) { + status = readw_relaxed(&generic_comm_base->status); + if (status & PCC_CMD_COMPLETE_MASK) { ret = 0; + if (chk_err_bit && (status & PCC_ERROR_MASK)) + ret = -EIO; break; } /* @@ -102,14 +213,23 @@ static int check_pcc_chan(void) udelay(3); } + if (likely(!ret)) + pcc_data.platform_owns_pcc = false; + else + pr_err("PCC check channel failed. Status=%x\n", status); + return ret; } +/* + * This function transfers the ownership of the PCC to the platform + * So it must be called while holding write_lock(pcc_lock) + */ static int send_pcc_cmd(u16 cmd) { - int ret = -EIO; + int ret = -EIO, i; struct acpi_pcct_shared_memory *generic_comm_base = - (struct acpi_pcct_shared_memory *) pcc_comm_addr; + (struct acpi_pcct_shared_memory *) pcc_data.pcc_comm_addr; static ktime_t last_cmd_cmpl_time, last_mpar_reset; static int mpar_count; unsigned int time_delta; @@ -119,20 +239,29 @@ static int send_pcc_cmd(u16 cmd) * the channel before writing to PCC space */ if (cmd == CMD_READ) { - ret = check_pcc_chan(); + /* + * If there are pending cpc_writes, then we stole the channel + * before write completion, so first send a WRITE command to + * platform + */ + if (pcc_data.pending_pcc_write_cmd) + send_pcc_cmd(CMD_WRITE); + + ret = check_pcc_chan(false); if (ret) - return ret; - } + goto end; + } else /* CMD_WRITE */ + pcc_data.pending_pcc_write_cmd = FALSE; /* * Handle the Minimum Request Turnaround Time(MRTT) * "The minimum amount of time that OSPM must wait after the completion * of a command before issuing the next command, in microseconds" */ - if (pcc_mrtt) { + if (pcc_data.pcc_mrtt) { time_delta = ktime_us_delta(ktime_get(), last_cmd_cmpl_time); - if (pcc_mrtt > time_delta) - udelay(pcc_mrtt - time_delta); + if (pcc_data.pcc_mrtt > time_delta) + udelay(pcc_data.pcc_mrtt - time_delta); } /* @@ -146,15 +275,16 @@ static int send_pcc_cmd(u16 cmd) * not send the request to the platform after hitting the MPAR limit in * any 60s window */ - if (pcc_mpar) { + if (pcc_data.pcc_mpar) { if (mpar_count == 0) { time_delta = ktime_ms_delta(ktime_get(), last_mpar_reset); if (time_delta < 60 * MSEC_PER_SEC) { pr_debug("PCC cmd not sent due to MPAR limit"); - return -EIO; + ret = -EIO; + goto end; } last_mpar_reset = ktime_get(); - mpar_count = pcc_mpar; + mpar_count = pcc_data.pcc_mpar; } mpar_count--; } @@ -165,33 +295,43 @@ static int send_pcc_cmd(u16 cmd) /* Flip CMD COMPLETE bit */ writew_relaxed(0, &generic_comm_base->status); + pcc_data.platform_owns_pcc = true; + /* Ring doorbell */ - ret = mbox_send_message(pcc_channel, &cmd); + ret = mbox_send_message(pcc_data.pcc_channel, &cmd); if (ret < 0) { pr_err("Err sending PCC mbox message. cmd:%d, ret:%d\n", cmd, ret); - return ret; + goto end; } - /* - * For READs we need to ensure the cmd completed to ensure - * the ensuing read()s can proceed. For WRITEs we dont care - * because the actual write()s are done before coming here - * and the next READ or WRITE will check if the channel - * is busy/free at the entry of this call. - * - * If Minimum Request Turnaround Time is non-zero, we need - * to record the completion time of both READ and WRITE - * command for proper handling of MRTT, so we need to check - * for pcc_mrtt in addition to CMD_READ - */ - if (cmd == CMD_READ || pcc_mrtt) { - ret = check_pcc_chan(); - if (pcc_mrtt) - last_cmd_cmpl_time = ktime_get(); + /* wait for completion and check for PCC errro bit */ + ret = check_pcc_chan(true); + + if (pcc_data.pcc_mrtt) + last_cmd_cmpl_time = ktime_get(); + + if (pcc_data.pcc_channel->mbox->txdone_irq) + mbox_chan_txdone(pcc_data.pcc_channel, ret); + else + mbox_client_txdone(pcc_data.pcc_channel, ret); + +end: + if (cmd == CMD_WRITE) { + if (unlikely(ret)) { + for_each_possible_cpu(i) { + struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i); + if (!desc) + continue; + + if (desc->write_cmd_id == pcc_data.pcc_write_cnt) + desc->write_cmd_status = ret; + } + } + pcc_data.pcc_write_cnt++; + wake_up_all(&pcc_data.pcc_write_wait_q); } - mbox_client_txdone(pcc_channel, ret); return ret; } @@ -272,13 +412,13 @@ end: * * Return: 0 for success or negative value for err. */ -int acpi_get_psd_map(struct cpudata **all_cpu_data) +int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) { int count_target; int retval = 0; unsigned int i, j; cpumask_var_t covered_cpus; - struct cpudata *pr, *match_pr; + struct cppc_cpudata *pr, *match_pr; struct acpi_psd_package *pdomain; struct acpi_psd_package *match_pdomain; struct cpc_desc *cpc_ptr, *match_cpc_ptr; @@ -394,14 +534,13 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map); static int register_pcc_channel(int pcc_subspace_idx) { struct acpi_pcct_hw_reduced *cppc_ss; - unsigned int len; u64 usecs_lat; if (pcc_subspace_idx >= 0) { - pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, + pcc_data.pcc_channel = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_subspace_idx); - if (IS_ERR(pcc_channel)) { + if (IS_ERR(pcc_data.pcc_channel)) { pr_err("Failed to find PCC communication channel\n"); return -ENODEV; } @@ -412,43 +551,50 @@ static int register_pcc_channel(int pcc_subspace_idx) * PCC channels) and stored pointers to the * subspace communication region in con_priv. */ - cppc_ss = pcc_channel->con_priv; + cppc_ss = (pcc_data.pcc_channel)->con_priv; if (!cppc_ss) { pr_err("No PCC subspace found for CPPC\n"); return -ENODEV; } - /* - * This is the shared communication region - * for the OS and Platform to communicate over. - */ - comm_base_addr = cppc_ss->base_address; - len = cppc_ss->length; - /* * cppc_ss->latency is just a Nominal value. In reality * the remote processor could be much slower to reply. * So add an arbitrary amount of wait on top of Nominal. */ usecs_lat = NUM_RETRIES * cppc_ss->latency; - deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); - pcc_mrtt = cppc_ss->min_turnaround_time; - pcc_mpar = cppc_ss->max_access_rate; + pcc_data.deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC); + pcc_data.pcc_mrtt = cppc_ss->min_turnaround_time; + pcc_data.pcc_mpar = cppc_ss->max_access_rate; + pcc_data.pcc_nominal = cppc_ss->latency; - pcc_comm_addr = acpi_os_ioremap(comm_base_addr, len); - if (!pcc_comm_addr) { + pcc_data.pcc_comm_addr = acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length); + if (!pcc_data.pcc_comm_addr) { pr_err("Failed to ioremap PCC comm region mem\n"); return -ENOMEM; } /* Set flag so that we dont come here for each CPU. */ - pcc_channel_acquired = true; + pcc_data.pcc_channel_acquired = true; } return 0; } +/** + * cpc_ffh_supported() - check if FFH reading supported + * + * Check if the architecture has support for functional fixed hardware + * read/write capability. + * + * Return: true for supported, false for not supported + */ +bool __weak cpc_ffh_supported(void) +{ + return false; +} + /* * An example CPC table looks like the following. * @@ -507,6 +653,7 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) union acpi_object *out_obj, *cpc_obj; struct cpc_desc *cpc_ptr; struct cpc_reg *gas_t; + struct device *cpu_dev; acpi_handle handle = pr->handle; unsigned int num_ent, i, cpc_rev; acpi_status status; @@ -545,6 +692,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; } + cpc_ptr->num_entries = num_ent; + /* Second entry should be revision. */ cpc_obj = &out_obj->package.elements[1]; if (cpc_obj->type == ACPI_TYPE_INTEGER) { @@ -579,16 +728,27 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) * so extract it only once. */ if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - if (pcc_subspace_idx < 0) - pcc_subspace_idx = gas_t->access_width; - else if (pcc_subspace_idx != gas_t->access_width) { + if (pcc_data.pcc_subspace_idx < 0) + pcc_data.pcc_subspace_idx = gas_t->access_width; + else if (pcc_data.pcc_subspace_idx != gas_t->access_width) { pr_debug("Mismatched PCC ids.\n"); goto out_free; } - } else if (gas_t->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) { - /* Support only PCC and SYS MEM type regs */ - pr_debug("Unsupported register type: %d\n", gas_t->space_id); - goto out_free; + } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + if (gas_t->address) { + void __iomem *addr; + + addr = ioremap(gas_t->address, gas_t->bit_width/8); + if (!addr) + goto out_free; + cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr; + } + } else { + if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { + /* Support only PCC ,SYS MEM and FFH type regs */ + pr_debug("Unsupported register type: %d\n", gas_t->space_id); + goto out_free; + } } cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER; @@ -607,10 +767,13 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) goto out_free; /* Register PCC channel once for all CPUs. */ - if (!pcc_channel_acquired) { - ret = register_pcc_channel(pcc_subspace_idx); + if (!pcc_data.pcc_channel_acquired) { + ret = register_pcc_channel(pcc_data.pcc_subspace_idx); if (ret) goto out_free; + + init_rwsem(&pcc_data.pcc_lock); + init_waitqueue_head(&pcc_data.pcc_write_wait_q); } /* Plug PSD data into this CPUs CPC descriptor. */ @@ -619,10 +782,27 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) /* Everything looks okay */ pr_debug("Parsed CPC struct for CPU: %d\n", pr->id); + /* Add per logical CPU nodes for reading its feedback counters. */ + cpu_dev = get_cpu_device(pr->id); + if (!cpu_dev) + goto out_free; + + ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj, + "acpi_cppc"); + if (ret) + goto out_free; + kfree(output.pointer); return 0; out_free: + /* Free all the mapped sys mem areas for this CPU */ + for (i = 2; i < cpc_ptr->num_entries; i++) { + void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; + + if (addr) + iounmap(addr); + } kfree(cpc_ptr); out_buf_free: @@ -640,26 +820,82 @@ EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe); void acpi_cppc_processor_exit(struct acpi_processor *pr) { struct cpc_desc *cpc_ptr; + unsigned int i; + void __iomem *addr; + cpc_ptr = per_cpu(cpc_desc_ptr, pr->id); + + /* Free all the mapped sys mem areas for this CPU */ + for (i = 2; i < cpc_ptr->num_entries; i++) { + addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr; + if (addr) + iounmap(addr); + } + + kobject_put(&cpc_ptr->kobj); kfree(cpc_ptr); } EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit); +/** + * cpc_read_ffh() - Read FFH register + * @cpunum: cpu number to read + * @reg: cppc register information + * @val: place holder for return value + * + * Read bit_width bits from a specified address and bit_offset + * + * Return: 0 for success and error code + */ +int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val) +{ + return -ENOTSUPP; +} + +/** + * cpc_write_ffh() - Write FFH register + * @cpunum: cpu number to write + * @reg: cppc register information + * @val: value to write + * + * Write value of bit_width bits to a specified address and bit_offset + * + * Return: 0 for success and error code + */ +int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val) +{ + return -ENOTSUPP; +} + /* * Since cpc_read and cpc_write are called while holding pcc_lock, it should be * as fast as possible. We have already mapped the PCC subspace during init, so * we can directly write to it. */ -static int cpc_read(struct cpc_reg *reg, u64 *val) +static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val) { int ret_val = 0; + void __iomem *vaddr = 0; + struct cpc_reg *reg = ®_res->cpc_entry.reg; + + if (reg_res->type == ACPI_TYPE_INTEGER) { + *val = reg_res->cpc_entry.int_value; + return ret_val; + } *val = 0; - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - void __iomem *vaddr = GET_PCC_VADDR(reg->address); + if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) + vaddr = GET_PCC_VADDR(reg->address); + else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + vaddr = reg_res->sys_mem_vaddr; + else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) + return cpc_read_ffh(cpu, reg, val); + else + return acpi_os_read_memory((acpi_physical_address)reg->address, + val, reg->bit_width); - switch (reg->bit_width) { + switch (reg->bit_width) { case 8: *val = readb_relaxed(vaddr); break; @@ -674,23 +910,30 @@ static int cpc_read(struct cpc_reg *reg, u64 *val) break; default: pr_debug("Error: Cannot read %u bit width from PCC\n", - reg->bit_width); + reg->bit_width); ret_val = -EFAULT; - } - } else - ret_val = acpi_os_read_memory((acpi_physical_address)reg->address, - val, reg->bit_width); + } + return ret_val; } -static int cpc_write(struct cpc_reg *reg, u64 val) +static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val) { int ret_val = 0; + void __iomem *vaddr = 0; + struct cpc_reg *reg = ®_res->cpc_entry.reg; - if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - void __iomem *vaddr = GET_PCC_VADDR(reg->address); + if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) + vaddr = GET_PCC_VADDR(reg->address); + else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) + vaddr = reg_res->sys_mem_vaddr; + else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) + return cpc_write_ffh(cpu, reg, val); + else + return acpi_os_write_memory((acpi_physical_address)reg->address, + val, reg->bit_width); - switch (reg->bit_width) { + switch (reg->bit_width) { case 8: writeb_relaxed(val, vaddr); break; @@ -705,13 +948,11 @@ static int cpc_write(struct cpc_reg *reg, u64 val) break; default: pr_debug("Error: Cannot write %u bit width to PCC\n", - reg->bit_width); + reg->bit_width); ret_val = -EFAULT; break; - } - } else - ret_val = acpi_os_write_memory((acpi_physical_address)reg->address, - val, reg->bit_width); + } + return ret_val; } @@ -727,8 +968,8 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); struct cpc_register_resource *highest_reg, *lowest_reg, *ref_perf, *nom_perf; - u64 high, low, ref, nom; - int ret = 0; + u64 high, low, nom; + int ret = 0, regs_in_pcc = 0; if (!cpc_desc) { pr_debug("No CPC descriptor for CPU:%d\n", cpunum); @@ -740,13 +981,11 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) ref_perf = &cpc_desc->cpc_regs[REFERENCE_PERF]; nom_perf = &cpc_desc->cpc_regs[NOMINAL_PERF]; - spin_lock(&pcc_lock); - /* Are any of the regs PCC ?*/ - if ((highest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || - (lowest_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || - (ref_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || - (nom_perf->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { + if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) || + CPC_IN_PCC(ref_perf) || CPC_IN_PCC(nom_perf)) { + regs_in_pcc = 1; + down_write(&pcc_data.pcc_lock); /* Ring doorbell once to update PCC subspace */ if (send_pcc_cmd(CMD_READ) < 0) { ret = -EIO; @@ -754,26 +993,21 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps) } } - cpc_read(&highest_reg->cpc_entry.reg, &high); + cpc_read(cpunum, highest_reg, &high); perf_caps->highest_perf = high; - cpc_read(&lowest_reg->cpc_entry.reg, &low); + cpc_read(cpunum, lowest_reg, &low); perf_caps->lowest_perf = low; - cpc_read(&ref_perf->cpc_entry.reg, &ref); - perf_caps->reference_perf = ref; - - cpc_read(&nom_perf->cpc_entry.reg, &nom); + cpc_read(cpunum, nom_perf, &nom); perf_caps->nominal_perf = nom; - if (!ref) - perf_caps->reference_perf = perf_caps->nominal_perf; - if (!high || !low || !nom) ret = -EFAULT; out_err: - spin_unlock(&pcc_lock); + if (regs_in_pcc) + up_write(&pcc_data.pcc_lock); return ret; } EXPORT_SYMBOL_GPL(cppc_get_perf_caps); @@ -788,9 +1022,10 @@ EXPORT_SYMBOL_GPL(cppc_get_perf_caps); int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) { struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum); - struct cpc_register_resource *delivered_reg, *reference_reg; - u64 delivered, reference; - int ret = 0; + struct cpc_register_resource *delivered_reg, *reference_reg, + *ref_perf_reg, *ctr_wrap_reg; + u64 delivered, reference, ref_perf, ctr_wrap_time; + int ret = 0, regs_in_pcc = 0; if (!cpc_desc) { pr_debug("No CPC descriptor for CPU:%d\n", cpunum); @@ -799,12 +1034,21 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR]; reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR]; + ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF]; + ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME]; - spin_lock(&pcc_lock); + /* + * If refernce perf register is not supported then we should + * use the nominal perf value + */ + if (!CPC_SUPPORTED(ref_perf_reg)) + ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF]; /* Are any of the regs PCC ?*/ - if ((delivered_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) || - (reference_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM)) { + if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) || + CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) { + down_write(&pcc_data.pcc_lock); + regs_in_pcc = 1; /* Ring doorbell once to update PCC subspace */ if (send_pcc_cmd(CMD_READ) < 0) { ret = -EIO; @@ -812,25 +1056,31 @@ int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs) } } - cpc_read(&delivered_reg->cpc_entry.reg, &delivered); - cpc_read(&reference_reg->cpc_entry.reg, &reference); + cpc_read(cpunum, delivered_reg, &delivered); + cpc_read(cpunum, reference_reg, &reference); + cpc_read(cpunum, ref_perf_reg, &ref_perf); - if (!delivered || !reference) { + /* + * Per spec, if ctr_wrap_time optional register is unsupported, then the + * performance counters are assumed to never wrap during the lifetime of + * platform + */ + ctr_wrap_time = (u64)(~((u64)0)); + if (CPC_SUPPORTED(ctr_wrap_reg)) + cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time); + + if (!delivered || !reference || !ref_perf) { ret = -EFAULT; goto out_err; } perf_fb_ctrs->delivered = delivered; perf_fb_ctrs->reference = reference; - - perf_fb_ctrs->delivered -= perf_fb_ctrs->prev_delivered; - perf_fb_ctrs->reference -= perf_fb_ctrs->prev_reference; - - perf_fb_ctrs->prev_delivered = delivered; - perf_fb_ctrs->prev_reference = reference; - + perf_fb_ctrs->reference_perf = ref_perf; + perf_fb_ctrs->ctr_wrap_time = ctr_wrap_time; out_err: - spin_unlock(&pcc_lock); + if (regs_in_pcc) + up_write(&pcc_data.pcc_lock); return ret; } EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs); @@ -855,30 +1105,142 @@ int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls) desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; - spin_lock(&pcc_lock); - - /* If this is PCC reg, check if channel is free before writing */ - if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - ret = check_pcc_chan(); - if (ret) - goto busy_channel; + /* + * This is Phase-I where we want to write to CPC registers + * -> We want all CPUs to be able to execute this phase in parallel + * + * Since read_lock can be acquired by multiple CPUs simultaneously we + * achieve that goal here + */ + if (CPC_IN_PCC(desired_reg)) { + down_read(&pcc_data.pcc_lock); /* BEGIN Phase-I */ + if (pcc_data.platform_owns_pcc) { + ret = check_pcc_chan(false); + if (ret) { + up_read(&pcc_data.pcc_lock); + return ret; + } + } + /* + * Update the pending_write to make sure a PCC CMD_READ will not + * arrive and steal the channel during the switch to write lock + */ + pcc_data.pending_pcc_write_cmd = true; + cpc_desc->write_cmd_id = pcc_data.pcc_write_cnt; + cpc_desc->write_cmd_status = 0; } /* * Skip writing MIN/MAX until Linux knows how to come up with * useful values. */ - cpc_write(&desired_reg->cpc_entry.reg, perf_ctrls->desired_perf); + cpc_write(cpu, desired_reg, perf_ctrls->desired_perf); - /* Is this a PCC reg ?*/ - if (desired_reg->cpc_entry.reg.space_id == ACPI_ADR_SPACE_PLATFORM_COMM) { - /* Ring doorbell so Remote can get our perf request. */ - if (send_pcc_cmd(CMD_WRITE) < 0) - ret = -EIO; + if (CPC_IN_PCC(desired_reg)) + up_read(&pcc_data.pcc_lock); /* END Phase-I */ + /* + * This is Phase-II where we transfer the ownership of PCC to Platform + * + * Short Summary: Basically if we think of a group of cppc_set_perf + * requests that happened in short overlapping interval. The last CPU to + * come out of Phase-I will enter Phase-II and ring the doorbell. + * + * We have the following requirements for Phase-II: + * 1. We want to execute Phase-II only when there are no CPUs + * currently executing in Phase-I + * 2. Once we start Phase-II we want to avoid all other CPUs from + * entering Phase-I. + * 3. We want only one CPU among all those who went through Phase-I + * to run phase-II + * + * If write_trylock fails to get the lock and doesn't transfer the + * PCC ownership to the platform, then one of the following will be TRUE + * 1. There is at-least one CPU in Phase-I which will later execute + * write_trylock, so the CPUs in Phase-I will be responsible for + * executing the Phase-II. + * 2. Some other CPU has beaten this CPU to successfully execute the + * write_trylock and has already acquired the write_lock. We know for a + * fact it(other CPU acquiring the write_lock) couldn't have happened + * before this CPU's Phase-I as we held the read_lock. + * 3. Some other CPU executing pcc CMD_READ has stolen the + * down_write, in which case, send_pcc_cmd will check for pending + * CMD_WRITE commands by checking the pending_pcc_write_cmd. + * So this CPU can be certain that its request will be delivered + * So in all cases, this CPU knows that its request will be delivered + * by another CPU and can return + * + * After getting the down_write we still need to check for + * pending_pcc_write_cmd to take care of the following scenario + * The thread running this code could be scheduled out between + * Phase-I and Phase-II. Before it is scheduled back on, another CPU + * could have delivered the request to Platform by triggering the + * doorbell and transferred the ownership of PCC to platform. So this + * avoids triggering an unnecessary doorbell and more importantly before + * triggering the doorbell it makes sure that the PCC channel ownership + * is still with OSPM. + * pending_pcc_write_cmd can also be cleared by a different CPU, if + * there was a pcc CMD_READ waiting on down_write and it steals the lock + * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this + * case during a CMD_READ and if there are pending writes it delivers + * the write command before servicing the read command + */ + if (CPC_IN_PCC(desired_reg)) { + if (down_write_trylock(&pcc_data.pcc_lock)) { /* BEGIN Phase-II */ + /* Update only if there are pending write commands */ + if (pcc_data.pending_pcc_write_cmd) + send_pcc_cmd(CMD_WRITE); + up_write(&pcc_data.pcc_lock); /* END Phase-II */ + } else + /* Wait until pcc_write_cnt is updated by send_pcc_cmd */ + wait_event(pcc_data.pcc_write_wait_q, + cpc_desc->write_cmd_id != pcc_data.pcc_write_cnt); + + /* send_pcc_cmd updates the status in case of failure */ + ret = cpc_desc->write_cmd_status; } -busy_channel: - spin_unlock(&pcc_lock); - return ret; } EXPORT_SYMBOL_GPL(cppc_set_perf); + +/** + * cppc_get_transition_latency - returns frequency transition latency in ns + * + * ACPI CPPC does not explicitly specifiy how a platform can specify the + * transition latency for perfromance change requests. The closest we have + * is the timing information from the PCCT tables which provides the info + * on the number and frequency of PCC commands the platform can handle. + */ +unsigned int cppc_get_transition_latency(int cpu_num) +{ + /* + * Expected transition latency is based on the PCCT timing values + * Below are definition from ACPI spec: + * pcc_nominal- Expected latency to process a command, in microseconds + * pcc_mpar - The maximum number of periodic requests that the subspace + * channel can support, reported in commands per minute. 0 + * indicates no limitation. + * pcc_mrtt - The minimum amount of time that OSPM must wait after the + * completion of a command before issuing the next command, + * in microseconds. + */ + unsigned int latency_ns = 0; + struct cpc_desc *cpc_desc; + struct cpc_register_resource *desired_reg; + + cpc_desc = per_cpu(cpc_desc_ptr, cpu_num); + if (!cpc_desc) + return CPUFREQ_ETERNAL; + + desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF]; + if (!CPC_IN_PCC(desired_reg)) + return CPUFREQ_ETERNAL; + + if (pcc_data.pcc_mpar) + latency_ns = 60 * (1000 * 1000 * 1000 / pcc_data.pcc_mpar); + + latency_ns = max(latency_ns, pcc_data.pcc_nominal * 1000); + latency_ns = max(latency_ns, pcc_data.pcc_mrtt * 1000); + + return latency_ns; +} +EXPORT_SYMBOL_GPL(cppc_get_transition_latency); diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index 0553aeebb228..8f8552a19e63 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -245,8 +245,8 @@ static int __acpi_processor_start(struct acpi_device *device) return 0; result = acpi_cppc_processor_probe(pr); - if (result) - return -ENODEV; + if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS)) + dev_warn(&device->dev, "CPPC data invalid or not present\n"); if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) acpi_processor_power_init(pr); diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index 8882b8e2ecd0..6588ec567d93 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -30,13 +30,13 @@ * performance capabilities, desired performance level * requested etc. */ -static struct cpudata **all_cpu_data; +static struct cppc_cpudata **all_cpu_data; static int cppc_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { - struct cpudata *cpu; + struct cppc_cpudata *cpu; struct cpufreq_freqs freqs; int ret = 0; @@ -66,7 +66,7 @@ static int cppc_verify_policy(struct cpufreq_policy *policy) static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) { int cpu_num = policy->cpu; - struct cpudata *cpu = all_cpu_data[cpu_num]; + struct cppc_cpudata *cpu = all_cpu_data[cpu_num]; int ret; cpu->perf_ctrls.desired_perf = cpu->perf_caps.lowest_perf; @@ -79,7 +79,7 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy) static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) { - struct cpudata *cpu; + struct cppc_cpudata *cpu; unsigned int cpu_num = policy->cpu; int ret = 0; @@ -98,6 +98,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->max = cpu->perf_caps.highest_perf; policy->cpuinfo.min_freq = policy->min; policy->cpuinfo.max_freq = policy->max; + policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num); policy->shared_type = cpu->shared_type; if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) @@ -134,7 +135,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = { static int __init cppc_cpufreq_init(void) { int i, ret = 0; - struct cpudata *cpu; + struct cppc_cpudata *cpu; if (acpi_disabled) return -ENODEV; @@ -144,7 +145,7 @@ static int __init cppc_cpufreq_init(void) return -ENOMEM; for_each_possible_cpu(i) { - all_cpu_data[i] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); + all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL); if (!all_cpu_data[i]) goto out; @@ -175,7 +176,7 @@ out: static void __exit cppc_cpufreq_exit(void) { - struct cpudata *cpu; + struct cppc_cpudata *cpu; int i; cpufreq_unregister_driver(&cppc_cpufreq_driver); diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 043828d541f7..08c87fadca8c 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #include #include @@ -68,11 +69,16 @@ #include "mailbox.h" #define MAX_PCC_SUBSPACES 256 +#define MBOX_IRQ_NAME "pcc-mbox" static struct mbox_chan *pcc_mbox_channels; /* Array of cached virtual address for doorbell registers */ static void __iomem **pcc_doorbell_vaddr; +/* Array of cached virtual address for doorbell ack registers */ +static void __iomem **pcc_doorbell_ack_vaddr; +/* Array of doorbell interrupts */ +static int *pcc_doorbell_irq; static struct mbox_controller pcc_mbox_ctrl = {}; /** @@ -91,79 +97,6 @@ static struct mbox_chan *get_pcc_channel(int id) return &pcc_mbox_channels[id]; } -/** - * pcc_mbox_request_channel - PCC clients call this function to - * request a pointer to their PCC subspace, from which they - * can get the details of communicating with the remote. - * @cl: Pointer to Mailbox client, so we know where to bind the - * Channel. - * @subspace_id: The PCC Subspace index as parsed in the PCC client - * ACPI package. This is used to lookup the array of PCC - * subspaces as parsed by the PCC Mailbox controller. - * - * Return: Pointer to the Mailbox Channel if successful or - * ERR_PTR. - */ -struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, - int subspace_id) -{ - struct device *dev = pcc_mbox_ctrl.dev; - struct mbox_chan *chan; - unsigned long flags; - - /* - * Each PCC Subspace is a Mailbox Channel. - * The PCC Clients get their PCC Subspace ID - * from their own tables and pass it here. - * This returns a pointer to the PCC subspace - * for the Client to operate on. - */ - chan = get_pcc_channel(subspace_id); - - if (IS_ERR(chan) || chan->cl) { - dev_err(dev, "Channel not found for idx: %d\n", subspace_id); - return ERR_PTR(-EBUSY); - } - - spin_lock_irqsave(&chan->lock, flags); - chan->msg_free = 0; - chan->msg_count = 0; - chan->active_req = NULL; - chan->cl = cl; - init_completion(&chan->tx_complete); - - if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) - chan->txdone_method |= TXDONE_BY_ACK; - - spin_unlock_irqrestore(&chan->lock, flags); - - return chan; -} -EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); - -/** - * pcc_mbox_free_channel - Clients call this to free their Channel. - * - * @chan: Pointer to the mailbox channel as returned by - * pcc_mbox_request_channel() - */ -void pcc_mbox_free_channel(struct mbox_chan *chan) -{ - unsigned long flags; - - if (!chan || !chan->cl) - return; - - spin_lock_irqsave(&chan->lock, flags); - chan->cl = NULL; - chan->active_req = NULL; - if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) - chan->txdone_method = TXDONE_BY_POLL; - - spin_unlock_irqrestore(&chan->lock, flags); -} -EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); - /* * PCC can be used with perf critical drivers such as CPPC * So it makes sense to locally cache the virtual address and @@ -224,6 +157,167 @@ static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width) return ret_val; } +/** + * pcc_map_interrupt - Map a PCC subspace GSI to a linux IRQ number + * @interrupt: GSI number. + * @flags: interrupt flags + * + * Returns: a valid linux IRQ number on success + * 0 or -EINVAL on failure + */ +static int pcc_map_interrupt(u32 interrupt, u32 flags) +{ + int trigger, polarity; + + if (!interrupt) + return 0; + + trigger = (flags & ACPI_PCCT_INTERRUPT_MODE) ? ACPI_EDGE_SENSITIVE + : ACPI_LEVEL_SENSITIVE; + + polarity = (flags & ACPI_PCCT_INTERRUPT_POLARITY) ? ACPI_ACTIVE_LOW + : ACPI_ACTIVE_HIGH; + + return acpi_register_gsi(NULL, interrupt, trigger, polarity); +} + +/** + * pcc_mbox_irq - PCC mailbox interrupt handler + */ +static irqreturn_t pcc_mbox_irq(int irq, void *p) +{ + struct acpi_generic_address *doorbell_ack; + struct acpi_pcct_hw_reduced *pcct_ss; + struct mbox_chan *chan = p; + u64 doorbell_ack_preserve; + u64 doorbell_ack_write; + u64 doorbell_ack_val; + int ret; + + pcct_ss = chan->con_priv; + + mbox_chan_received_data(chan, NULL); + + if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { + struct acpi_pcct_hw_reduced_type2 *pcct2_ss = chan->con_priv; + u32 id = chan - pcc_mbox_channels; + + doorbell_ack = &pcct2_ss->doorbell_ack_register; + doorbell_ack_preserve = pcct2_ss->ack_preserve_mask; + doorbell_ack_write = pcct2_ss->ack_write_mask; + + ret = read_register(pcc_doorbell_ack_vaddr[id], + &doorbell_ack_val, + doorbell_ack->bit_width); + if (ret) + return IRQ_NONE; + + ret = write_register(pcc_doorbell_ack_vaddr[id], + (doorbell_ack_val & doorbell_ack_preserve) + | doorbell_ack_write, + doorbell_ack->bit_width); + if (ret) + return IRQ_NONE; + } + + return IRQ_HANDLED; +} + +/** + * pcc_mbox_request_channel - PCC clients call this function to + * request a pointer to their PCC subspace, from which they + * can get the details of communicating with the remote. + * @cl: Pointer to Mailbox client, so we know where to bind the + * Channel. + * @subspace_id: The PCC Subspace index as parsed in the PCC client + * ACPI package. This is used to lookup the array of PCC + * subspaces as parsed by the PCC Mailbox controller. + * + * Return: Pointer to the Mailbox Channel if successful or + * ERR_PTR. + */ +struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, + int subspace_id) +{ + struct device *dev = pcc_mbox_ctrl.dev; + struct mbox_chan *chan; + unsigned long flags; + + /* + * Each PCC Subspace is a Mailbox Channel. + * The PCC Clients get their PCC Subspace ID + * from their own tables and pass it here. + * This returns a pointer to the PCC subspace + * for the Client to operate on. + */ + chan = get_pcc_channel(subspace_id); + + if (IS_ERR(chan) || chan->cl) { + dev_err(dev, "Channel not found for idx: %d\n", subspace_id); + return ERR_PTR(-EBUSY); + } + + spin_lock_irqsave(&chan->lock, flags); + chan->msg_free = 0; + chan->msg_count = 0; + chan->active_req = NULL; + chan->cl = cl; + init_completion(&chan->tx_complete); + + if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) + chan->txdone_method |= TXDONE_BY_ACK; + + if (pcc_doorbell_irq[subspace_id] > 0) { + int rc; + + rc = devm_request_irq(dev, pcc_doorbell_irq[subspace_id], + pcc_mbox_irq, 0, MBOX_IRQ_NAME, chan); + if (unlikely(rc)) { + dev_err(dev, "failed to register PCC interrupt %d\n", + pcc_doorbell_irq[subspace_id]); + chan = ERR_PTR(rc); + } + } + + spin_unlock_irqrestore(&chan->lock, flags); + + return chan; +} +EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); + +/** + * pcc_mbox_free_channel - Clients call this to free their Channel. + * + * @chan: Pointer to the mailbox channel as returned by + * pcc_mbox_request_channel() + */ +void pcc_mbox_free_channel(struct mbox_chan *chan) +{ + u32 id = chan - pcc_mbox_channels; + unsigned long flags; + + if (!chan || !chan->cl) + return; + + if (id >= pcc_mbox_ctrl.num_chans) { + pr_debug("pcc_mbox_free_channel: Invalid mbox_chan passed\n"); + return; + } + + spin_lock_irqsave(&chan->lock, flags); + chan->cl = NULL; + chan->active_req = NULL; + if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) + chan->txdone_method = TXDONE_BY_POLL; + + if (pcc_doorbell_irq[id] > 0) + devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); + + spin_unlock_irqrestore(&chan->lock, flags); +} +EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); + + /** * pcc_send_data - Called from Mailbox Controller code. Used * here only to ring the channel doorbell. The PCC client @@ -296,8 +390,10 @@ static int parse_pcc_subspace(struct acpi_subtable_header *header, if (pcc_mbox_ctrl.num_chans <= MAX_PCC_SUBSPACES) { pcct_ss = (struct acpi_pcct_hw_reduced *) header; - if (pcct_ss->header.type != - ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) { + if ((pcct_ss->header.type != + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE) + && (pcct_ss->header.type != + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2)) { pr_err("Incorrect PCC Subspace type detected\n"); return -EINVAL; } @@ -306,6 +402,43 @@ static int parse_pcc_subspace(struct acpi_subtable_header *header, return 0; } +/** + * pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register + * There should be one entry per PCC client. + * @id: PCC subspace index. + * @pcct_ss: Pointer to the ACPI subtable header under the PCCT. + * + * Return: 0 for Success, else errno. + * + * This gets called for each entry in the PCC table. + */ +static int pcc_parse_subspace_irq(int id, + struct acpi_pcct_hw_reduced *pcct_ss) +{ + pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->doorbell_interrupt, + (u32)pcct_ss->flags); + if (pcc_doorbell_irq[id] <= 0) { + pr_err("PCC GSI %d not registered\n", + pcct_ss->doorbell_interrupt); + return -EINVAL; + } + + if (pcct_ss->header.type + == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) { + struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss; + + pcc_doorbell_ack_vaddr[id] = acpi_os_ioremap( + pcct2_ss->doorbell_ack_register.address, + pcct2_ss->doorbell_ack_register.bit_width / 8); + if (!pcc_doorbell_ack_vaddr[id]) { + pr_err("Failed to ioremap PCC ACK register\n"); + return -ENOMEM; + } + } + + return 0; +} + /** * acpi_pcc_probe - Parse the ACPI tree for the PCCT. * @@ -316,7 +449,9 @@ static int __init acpi_pcc_probe(void) acpi_size pcct_tbl_header_size; struct acpi_table_header *pcct_tbl; struct acpi_subtable_header *pcct_entry; - int count, i; + struct acpi_table_pcct *acpi_pcct_tbl; + int count, i, rc; + int sum = 0; acpi_status status = AE_OK; /* Search for PCCT */ @@ -333,37 +468,66 @@ static int __init acpi_pcc_probe(void) sizeof(struct acpi_table_pcct), ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE, parse_pcc_subspace, MAX_PCC_SUBSPACES); + sum += (count > 0) ? count : 0; - if (count <= 0) { + count = acpi_table_parse_entries(ACPI_SIG_PCCT, + sizeof(struct acpi_table_pcct), + ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2, + parse_pcc_subspace, MAX_PCC_SUBSPACES); + sum += (count > 0) ? count : 0; + + if (sum == 0 || sum >= MAX_PCC_SUBSPACES) { pr_err("Error parsing PCC subspaces from PCCT\n"); return -EINVAL; } pcc_mbox_channels = kzalloc(sizeof(struct mbox_chan) * - count, GFP_KERNEL); - + sum, GFP_KERNEL); if (!pcc_mbox_channels) { pr_err("Could not allocate space for PCC mbox channels\n"); return -ENOMEM; } - pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL); + pcc_doorbell_vaddr = kcalloc(sum, sizeof(void *), GFP_KERNEL); if (!pcc_doorbell_vaddr) { - kfree(pcc_mbox_channels); - return -ENOMEM; + rc = -ENOMEM; + goto err_free_mbox; + } + + pcc_doorbell_ack_vaddr = kcalloc(sum, sizeof(void *), GFP_KERNEL); + if (!pcc_doorbell_ack_vaddr) { + rc = -ENOMEM; + goto err_free_db_vaddr; + } + + pcc_doorbell_irq = kcalloc(sum, sizeof(int), GFP_KERNEL); + if (!pcc_doorbell_irq) { + rc = -ENOMEM; + goto err_free_db_ack_vaddr; } /* Point to the first PCC subspace entry */ pcct_entry = (struct acpi_subtable_header *) ( (unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct)); - for (i = 0; i < count; i++) { + acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl; + if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL) + pcc_mbox_ctrl.txdone_irq = true; + + for (i = 0; i < sum; i++) { struct acpi_generic_address *db_reg; struct acpi_pcct_hw_reduced *pcct_ss; pcc_mbox_channels[i].con_priv = pcct_entry; + pcct_ss = (struct acpi_pcct_hw_reduced *) pcct_entry; + + if (pcc_mbox_ctrl.txdone_irq) { + rc = pcc_parse_subspace_irq(i, pcct_ss); + if (rc < 0) + goto err; + } + /* If doorbell is in system memory cache the virt address */ - pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry; db_reg = &pcct_ss->doorbell_register; if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address, @@ -372,11 +536,21 @@ static int __init acpi_pcc_probe(void) ((unsigned long) pcct_entry + pcct_entry->length); } - pcc_mbox_ctrl.num_chans = count; + pcc_mbox_ctrl.num_chans = sum; pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans); return 0; + +err: + kfree(pcc_doorbell_irq); +err_free_db_ack_vaddr: + kfree(pcc_doorbell_ack_vaddr); +err_free_db_vaddr: + kfree(pcc_doorbell_vaddr); +err_free_mbox: + kfree(pcc_mbox_channels); + return rc; } /** diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h index 284965cbc9af..427a7c3e6c75 100644 --- a/include/acpi/cppc_acpi.h +++ b/include/acpi/cppc_acpi.h @@ -24,7 +24,9 @@ #define CPPC_NUM_ENT 21 #define CPPC_REV 2 -#define PCC_CMD_COMPLETE 1 +#define PCC_CMD_COMPLETE_MASK (1 << 0) +#define PCC_ERROR_MASK (1 << 2) + #define MAX_CPC_REG_ENT 19 /* CPPC specific PCC commands. */ @@ -49,6 +51,7 @@ struct cpc_reg { */ struct cpc_register_resource { acpi_object_type type; + u64 __iomem *sys_mem_vaddr; union { struct cpc_reg reg; u64 int_value; @@ -60,8 +63,11 @@ struct cpc_desc { int num_entries; int version; int cpu_id; + int write_cmd_status; + int write_cmd_id; struct cpc_register_resource cpc_regs[MAX_CPC_REG_ENT]; struct acpi_psd_package domain_info; + struct kobject kobj; }; /* These are indexes into the per-cpu cpc_regs[]. Order is important. */ @@ -96,7 +102,6 @@ enum cppc_regs { struct cppc_perf_caps { u32 highest_perf; u32 nominal_perf; - u32 reference_perf; u32 lowest_perf; }; @@ -108,13 +113,13 @@ struct cppc_perf_ctrls { struct cppc_perf_fb_ctrs { u64 reference; - u64 prev_reference; u64 delivered; - u64 prev_delivered; + u64 reference_perf; + u64 ctr_wrap_time; }; /* Per CPU container for runtime CPPC management. */ -struct cpudata { +struct cppc_cpudata { int cpu; struct cppc_perf_caps perf_caps; struct cppc_perf_ctrls perf_ctrls; @@ -127,6 +132,7 @@ struct cpudata { extern int cppc_get_perf_ctrs(int cpu, struct cppc_perf_fb_ctrs *perf_fb_ctrs); extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls); extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps); -extern int acpi_get_psd_map(struct cpudata **); +extern int acpi_get_psd_map(struct cppc_cpudata **); +extern unsigned int cppc_get_transition_latency(int cpu); #endif /* _CPPC_ACPI_H*/