linux-kernel-cpufreq.c_1

/*
 *  linux/drivers/cpufreq/cpufreq.c
 *
 *  Copyright (C) 2001 Russell King
 *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
 *
 *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
 * Added handling for CPU hotplug
 *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
 * Fix handling for CPU hotplug -- affected CPUs
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <asm/cputime.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/tick.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/cpu.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/syscore_ops.h>

#include <trace/events/power.h>

/**
 * The "cpufreq driver" - the arch- or hardware-dependent low
 * level driver of CPUFreq support, and its spinlock. This lock
 * also protects the cpufreq_cpu_data array.
 */
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
#ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */
static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
#endif
static DEFINE_RWLOCK(cpufreq_driver_lock);
static DEFINE_MUTEX(cpufreq_governor_lock);

/*
 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
 * all cpufreq/hotplug/workqueue/etc related lock issues.
 *
 * The rules for this semaphore:
 * - Any routine that wants to read from the policy structure will
 *   do a down_read on this semaphore.
 * - Any routine that will write to the policy structure and/or may take away
 *   the policy altogether (eg. CPU hotplug), will hold this lock in write
 *   mode before doing so.
 *
 * Additional rules:
 * - Governor routines that can be called in cpufreq hotplug path should not
 *   take this sem as top level hotplug notifier handler takes this.
 * - Lock should not be held across
 *     __cpufreq_governor(data, CPUFREQ_GOV_STOP);
 */
static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);

#define lock_policy_rwsem(mode, cpu)     \
static int lock_policy_rwsem_##mode(int cpu)    \
{         \
 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);  \
 BUG_ON(policy_cpu == -1);     \
 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));  \
         \
 return 0;       \
}

lock_policy_rwsem(read, cpu);
lock_policy_rwsem(write, cpu);

#define unlock_policy_rwsem(mode, cpu)     \
static void unlock_policy_rwsem_##mode(int cpu)    \
{         \
 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu);  \
 BUG_ON(policy_cpu == -1);     \
 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu));  \
}

unlock_policy_rwsem(read, cpu);
unlock_policy_rwsem(write, cpu);

/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
  unsigned int event);
static unsigned int __cpufreq_get(unsigned int cpu);
static void handle_update(struct work_struct *work);

/**
 * Two notifier lists: the "policy" list is involved in the
 * validation process for a new CPU frequency policy; the
 * "transition" list for kernel code that needs to handle
 * changes to devices when the CPU clock speed changes.
 * The mutex locks both lists.
 */
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
static struct srcu_notifier_head cpufreq_transition_notifier_list;

static bool init_cpufreq_transition_notifier_list_called;
static int __init init_cpufreq_transition_notifier_list(void)
{
 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
 init_cpufreq_transition_notifier_list_called = true;
 return 0;
}
pure_initcall(init_cpufreq_transition_notifier_list);

static int off __read_mostly;
static int cpufreq_disabled(void)
{
 return off;
}
void disable_cpufreq(void)
{
 off = 1;
}
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX(cpufreq_governor_mutex);

bool have_governor_per_policy(void)
{
 return cpufreq_driver->have_governor_per_policy;
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);

struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
 if (have_governor_per_policy())
  return &policy->kobj;
 else
  return cpufreq_global_kobject;
}
EXPORT_SYMBOL_GPL(get_governor_parent_kobj);

static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
 u64 idle_time;
 u64 cur_wall_time;
 u64 busy_time;

 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());

 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];

 idle_time = cur_wall_time - busy_time;
 if (wall)
  *wall = cputime_to_usecs(cur_wall_time);

 return cputime_to_usecs(idle_time);
}

u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
{
 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);

 if (idle_time == -1ULL)
  return get_cpu_idle_time_jiffy(cpu, wall);
 else if (!io_busy)
  idle_time += get_cpu_iowait_time_us(cpu, wall);

 return idle_time;
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);

static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
{
 struct cpufreq_policy *data;
 unsigned long flags;

 if (cpu >= nr_cpu_ids)
  goto err_out;

 /* get the cpufreq driver */
 read_lock_irqsave(&cpufreq_driver_lock, flags);

 if (!cpufreq_driver)
  goto err_out_unlock;

 if (!try_module_get(cpufreq_driver->owner))
  goto err_out_unlock;


 /* get the CPU */
 data = per_cpu(cpufreq_cpu_data, cpu);

 if (!data)
  goto err_out_put_module;

 if (!sysfs && !kobject_get(&data->kobj))
  goto err_out_put_module;

 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
 return data;

err_out_put_module:
 module_put(cpufreq_driver->owner);
err_out_unlock:
 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
err_out:
 return NULL;
}

struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
 if (cpufreq_disabled())
  return NULL;

 return __cpufreq_cpu_get(cpu, false);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);

static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
{
 return __cpufreq_cpu_get(cpu, true);
}

static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
{
 if (!sysfs)
  kobject_put(&data->kobj);
 module_put(cpufreq_driver->owner);
}

void cpufreq_cpu_put(struct cpufreq_policy *data)
{
 if (cpufreq_disabled())
  return;

 __cpufreq_cpu_put(data, false);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);

static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
{
 __cpufreq_cpu_put(data, true);
}

/*********************************************************************
 *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
 *********************************************************************/

/**
 * adjust_jiffies - adjust the system "loops_per_jiffy"
 *
 * This function alters the system "loops_per_jiffy" for the clock
 * speed change. Note that loops_per_jiffy cannot be updated on SMP
 * systems as each CPU might be scaled differently. So, use the arch
 * per-CPU loops_per_jiffy value wherever possible.
 */
#ifndef CONFIG_SMP
static unsigned long l_p_j_ref;
static unsigned int  l_p_j_ref_freq;

static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
 if (ci->flags & CPUFREQ_CONST_LOOPS)
  return;

 if (!l_p_j_ref_freq) {
  l_p_j_ref = loops_per_jiffy;
  l_p_j_ref_freq = ci->old;
  pr_debug("saving %lu as reference value for loops_per_jiffy; "
   "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
 }
 if ((val == CPUFREQ_POSTCHANGE  && ci->old != ci->new) ||
     (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
  loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
        ci->new);
  pr_debug("scaling loops_per_jiffy to %lu "
   "for frequency %u kHz\n", loops_per_jiffy, ci->new);
 }
}
#else
static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
{
 return;
}
#endif


void __cpufreq_notify_transition(struct cpufreq_policy *policy,
  struct cpufreq_freqs *freqs, unsigned int state)
{
 BUG_ON(irqs_disabled());

 if (cpufreq_disabled())
  return;

 freqs->flags = cpufreq_driver->flags;
 pr_debug("notification %u of frequency transition to %u kHz\n",
  state, freqs->new);

 switch (state) {

 case CPUFREQ_PRECHANGE:
  /* detect if the driver reported a value as "old frequency"
   * which is not equal to what the cpufreq core thinks is
   * "old frequency".
   */
  if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
   if ((policy) && (policy->cpu == freqs->cpu) &&
       (policy->cur) && (policy->cur != freqs->old)) {
    pr_debug("Warning: CPU frequency is"
     " %u, cpufreq assumed %u kHz.\n",
     freqs->old, policy->cur);
    freqs->old = policy->cur;
   }
  }
  srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
    CPUFREQ_PRECHANGE, freqs);
  adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
  break;

 case CPUFREQ_POSTCHANGE:
  adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
  pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
   (unsigned long)freqs->cpu);
  trace_cpu_frequency(freqs->new, freqs->cpu);
  srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
    CPUFREQ_POSTCHANGE, freqs);
  if (likely(policy) && likely(policy->cpu == freqs->cpu))
   policy->cur = freqs->new;
  break;
 }
}
/**
 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
 * on frequency transition.
 *
 * This function calls the transition notifiers and the "adjust_jiffies"
 * function. It is called twice on all CPU frequency changes that have
 * external effects.
 */
void cpufreq_notify_transition(struct cpufreq_policy *policy,
  struct cpufreq_freqs *freqs, unsigned int state)
{
 if (state == CPUFREQ_POSTCHANGE) {
  trace_cpufreq(policy->cpus, freqs->old,  freqs->new);
 }
 for_each_cpu(freqs->cpu, policy->cpus)
  __cpufreq_notify_transition(policy, freqs, state);
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);

 

/*********************************************************************
 *                          SYSFS INTERFACE                          *
 *********************************************************************/

static struct cpufreq_governor *__find_governor(const char *str_governor)
{
 struct cpufreq_governor *t;

 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
  if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
   return t;

 return NULL;
}

/**
 * cpufreq_parse_governor - parse a governor string
 */
static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
    struct cpufreq_governor **governor)
{
 int err = -EINVAL;

 if (!cpufreq_driver)
  goto out;

 if (cpufreq_driver->setpolicy) {
  if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
   *policy = CPUFREQ_POLICY_PERFORMANCE;
   err = 0;
  } else if (!strnicmp(str_governor, "powersave",
      CPUFREQ_NAME_LEN)) {
   *policy = CPUFREQ_POLICY_POWERSAVE;
   err = 0;
  }
 } else if (cpufreq_driver->target) {
  struct cpufreq_governor *t;

  mutex_lock(&cpufreq_governor_mutex);

  t = __find_governor(str_governor);

  if (t == NULL) {
   int ret;

   mutex_unlock(&cpufreq_governor_mutex);
   ret = request_module("cpufreq_%s", str_governor);
   mutex_lock(&cpufreq_governor_mutex);

   if (ret == 0)
    t = __find_governor(str_governor);
  }

  if (t != NULL) {
   *governor = t;
   err = 0;
  }

  mutex_unlock(&cpufreq_governor_mutex);
 }
out:
 return err;
}


/**
 * cpufreq_per_cpu_attr_read() / show_##file_name() -
 * print out cpufreq information
 *
 * Write out information from cpufreq_driver->policy[cpu]; object must be
 * "unsigned int".
 */

#define show_one(file_name, object)   \
static ssize_t show_##file_name    \
(struct cpufreq_policy *policy, char *buf)  \
{       \
 return sprintf(buf, "%u\n", policy->object); \
}

show_one(cpuinfo_min_freq, cpuinfo.min_freq);
show_one(cpuinfo_max_freq, cpuinfo.max_freq);
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);

static int __cpufreq_set_policy(struct cpufreq_policy *data,
    struct cpufreq_policy *policy);

/**
 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
 */
#define store_one(file_name, object)   \
static ssize_t store_##file_name     \
(struct cpufreq_policy *policy, const char *buf, size_t count)  \
{         \
 unsigned int ret;      \
 struct cpufreq_policy new_policy;    \
         \
 ret = cpufreq_get_policy(&new_policy, policy->cpu);  \
 if (ret)       \
  return -EINVAL;      \
         \
 ret = sscanf(buf, "%u", &new_policy.object);   \
 if (ret != 1)       \
  return -EINVAL;      \
         \
 ret = __cpufreq_set_policy(policy, &new_policy);  \
 policy->user_policy.object = policy->object;   \
         \
 return ret ? ret : count;     \
}

store_one(scaling_min_freq, min);
store_one(scaling_max_freq, max);

/**
 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
 */
static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
     char *buf)
{
 unsigned int cur_freq = __cpufreq_get(policy->cpu);
 if (!cur_freq)
  return sprintf(buf, "<unknown>");
 return sprintf(buf, "%u\n", cur_freq);
}


/**
 * show_scaling_governor - show the current policy for the specified CPU
 */
static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
{
 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
  return sprintf(buf, "powersave\n");
 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
  return sprintf(buf, "performance\n");
 else if (policy->governor)
  return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
    policy->governor->name);
 return -EINVAL;
}


/**
 * store_scaling_governor - store policy for the specified CPU
 */
static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
     const char *buf, size_t count)
{
 unsigned int ret;
 char str_governor[16];
 struct cpufreq_policy new_policy;

 ret = cpufreq_get_policy(&new_policy, policy->cpu);
 if (ret)
  return ret;

 ret = sscanf(buf, "%15s", str_governor);
 if (ret != 1)
  return -EINVAL;

 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
      &new_policy.governor))
  return -EINVAL;

 /* Do not use cpufreq_set_policy here or the user_policy.max
    will be wrongly overridden */
 ret = __cpufreq_set_policy(policy, &new_policy);

 policy->user_policy.policy = policy->policy;
 policy->user_policy.governor = policy->governor;

 if (ret)
  return ret;
 else
  return count;
}

/**
 * show_scaling_driver - show the cpufreq driver currently loaded
 */
static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
{
 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
}

/**
 * show_scaling_available_governors - show the available CPUfreq governors
 */
static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
      char *buf)
{
 ssize_t i = 0;
 struct cpufreq_governor *t;

 if (!cpufreq_driver->target) {
  i += sprintf(buf, "performance powersave");
  goto out;
 }

 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
  if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
      - (CPUFREQ_NAME_LEN + 2)))
   goto out;
  i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
 }
out:
 i += sprintf(&buf[i], "\n");
 return i;
}

static ssize_t show_cpus(const struct cpumask *mask, char *buf)
{
 ssize_t i = 0;
 unsigned int cpu;

 for_each_cpu(cpu, mask) {
  if (i)
   i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
  i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
  if (i >= (PAGE_SIZE - 5))
   break;
 }
 i += sprintf(&buf[i], "\n");
 return i;
}

/**
 * show_related_cpus - show the CPUs affected by each transition even if
 * hw coordination is in use
 */
static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
{
 return show_cpus(policy->related_cpus, buf);
}

/**
 * show_affected_cpus - show the CPUs affected by each transition
 */
static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
{
 return show_cpus(policy->cpus, buf);
}

static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
     const char *buf, size_t count)
{
 unsigned int freq = 0;
 unsigned int ret;

 if (!policy->governor || !policy->governor->store_setspeed)
  return -EINVAL;

 ret = sscanf(buf, "%u", &freq);
 if (ret != 1)
  return -EINVAL;

 policy->governor->store_setspeed(policy, freq);

 return count;
}

static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
{
 if (!policy->governor || !policy->governor->show_setspeed)
  return sprintf(buf, "<unsupported>\n");

 return policy->governor->show_setspeed(policy, buf);
}

/**
 * show_bios_limit - show the current cpufreq HW/BIOS limitation
 */
static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
{
 unsigned int limit;
 int ret;
 if (cpufreq_driver->bios_limit) {
  ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
  if (!ret)
   return sprintf(buf, "%u\n", limit);
 }
 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
}

cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
cpufreq_freq_attr_ro(cpuinfo_min_freq);
cpufreq_freq_attr_ro(cpuinfo_max_freq);
cpufreq_freq_attr_ro(cpuinfo_transition_latency);
cpufreq_freq_attr_ro(scaling_available_governors);
cpufreq_freq_attr_ro(scaling_driver);
cpufreq_freq_attr_ro(scaling_cur_freq);
cpufreq_freq_attr_ro(bios_limit);
cpufreq_freq_attr_ro(related_cpus);
cpufreq_freq_attr_ro(affected_cpus);
cpufreq_freq_attr_rw(scaling_min_freq);
cpufreq_freq_attr_rw(scaling_max_freq);
cpufreq_freq_attr_rw(scaling_governor);
cpufreq_freq_attr_rw(scaling_setspeed);

static struct attribute *default_attrs[] = {
 &cpuinfo_min_freq.attr,
 &cpuinfo_max_freq.attr,
 &cpuinfo_transition_latency.attr,
 &scaling_min_freq.attr,
 &scaling_max_freq.attr,
 &affected_cpus.attr,
 &related_cpus.attr,
 &scaling_governor.attr,
 &scaling_driver.attr,
 &scaling_available_governors.attr,
 &scaling_setspeed.attr,
 NULL
};

struct kobject *cpufreq_global_kobject;
EXPORT_SYMBOL(cpufreq_global_kobject);

#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
#define to_attr(a) container_of(a, struct freq_attr, attr)

static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
 struct cpufreq_policy *policy = to_policy(kobj);
 struct freq_attr *fattr = to_attr(attr);
 ssize_t ret = -EINVAL;
 policy = cpufreq_cpu_get_sysfs(policy->cpu);
 if (!policy)
  goto no_policy;

 if (lock_policy_rwsem_read(policy->cpu) < 0)
  goto fail;

 if (fattr->show)
  ret = fattr->show(policy, buf);
 else
  ret = -EIO;

 unlock_policy_rwsem_read(policy->cpu);
fail:
 cpufreq_cpu_put_sysfs(policy);
no_policy:
 return ret;
}

static ssize_t store(struct kobject *kobj, struct attribute *attr,
       const char *buf, size_t count)
{
 struct cpufreq_policy *policy = to_policy(kobj);
 struct freq_attr *fattr = to_attr(attr);
 ssize_t ret = -EINVAL;
 policy = cpufreq_cpu_get_sysfs(policy->cpu);
 if (!policy)
  goto no_policy;

 if (lock_policy_rwsem_write(policy->cpu) < 0)
  goto fail;

 if (fattr->store)
  ret = fattr->store(policy, buf, count);
 else
  ret = -EIO;

 unlock_policy_rwsem_write(policy->cpu);
fail:
 cpufreq_cpu_put_sysfs(policy);
no_policy:
 return ret;
}

static void cpufreq_sysfs_release(struct kobject *kobj)
{
 struct cpufreq_policy *policy = to_policy(kobj);
 pr_debug("last reference is dropped\n");
 complete(&policy->kobj_unregister);
}

static const struct sysfs_ops sysfs_ops = {
 .show = show,
 .store = store,
};

static struct kobj_type ktype_cpufreq = {
 .sysfs_ops = &sysfs_ops,
 .default_attrs = default_attrs,
 .release = cpufreq_sysfs_release,
};

/* symlink affected CPUs */
static int cpufreq_add_dev_symlink(unsigned int cpu,
       struct cpufreq_policy *policy)
{
 unsigned int j;
 int ret = 0;

 for_each_cpu(j, policy->cpus) {
  struct cpufreq_policy *managed_policy;
  struct device *cpu_dev;

  if (j == cpu)
   continue;

  pr_debug("CPU %u already managed, adding link\n", j);
  managed_policy = cpufreq_cpu_get(cpu);
  cpu_dev = get_cpu_device(j);
  ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
     "cpufreq");
  if (ret) {
   cpufreq_cpu_put(managed_policy);
   return ret;
  }
 }
 return ret;
}

static int cpufreq_add_dev_interface(unsigned int cpu,
         struct cpufreq_policy *policy,
         struct device *dev)
{
 struct cpufreq_policy new_policy;
 struct freq_attr **drv_attr;
 unsigned long flags;
 int ret = 0;
 unsigned int j;

 /* prepare interface data */
 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
       &dev->kobj, "cpufreq");
 if (ret)
  return ret;

 /* set up files for this cpu device */
 drv_attr = cpufreq_driver->attr;
 while ((drv_attr) && (*drv_attr)) {
  ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
  if (ret)
   goto err_out_kobj_put;
  drv_attr++;
 }
 if (cpufreq_driver->get) {
  ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
  if (ret)
   goto err_out_kobj_put;
 }
 if (cpufreq_driver->target) {
  ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
  if (ret)
   goto err_out_kobj_put;
 }
 if (cpufreq_driver->bios_limit) {
  ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
  if (ret)
   goto err_out_kobj_put;
 }

 write_lock_irqsave(&cpufreq_driver_lock, flags);
 for_each_cpu(j, policy->cpus) {
  per_cpu(cpufreq_cpu_data, j) = policy;
  per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
 }
 write_unlock_irqrestore(&cpufreq_driver_lock, flags);

 ret = cpufreq_add_dev_symlink(cpu, policy);
 if (ret)
  goto err_out_kobj_put;

 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
 /* assure that the starting sequence is run in __cpufreq_set_policy */
 policy->governor = NULL;

 /* set default policy */
 ret = __cpufreq_set_policy(policy, &new_policy);
 policy->user_policy.policy = policy->policy;
 policy->user_policy.governor = policy->governor;

 if (ret) {
  pr_debug("setting policy failed\n");
  if (cpufreq_driver->exit)
   cpufreq_driver->exit(policy);
 }
 return ret;

err_out_kobj_put:
 kobject_put(&policy->kobj);
 wait_for_completion(&policy->kobj_unregister);
 return ret;
}

#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
      struct device *dev)
{
 struct cpufreq_policy *policy;
 int ret = 0, has_target = !!cpufreq_driver->target;
 unsigned long flags;

 policy = cpufreq_cpu_get(sibling);
 WARN_ON(!policy);

 if (has_target)
  __cpufreq_governor(policy, CPUFREQ_GOV_STOP);

 lock_policy_rwsem_write(sibling);

 write_lock_irqsave(&cpufreq_driver_lock, flags);

 cpumask_set_cpu(cpu, policy->cpus);
 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
 per_cpu(cpufreq_cpu_data, cpu) = policy;
 write_unlock_irqrestore(&cpufreq_driver_lock, flags);

 unlock_policy_rwsem_write(sibling);

 if (has_target) {
  __cpufreq_governor(policy, CPUFREQ_GOV_START);
  __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
 }

 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
 if (ret) {
  cpufreq_cpu_put(policy);
  return ret;
 }

 return 0;
}
#endif

#ifdef CONFIG_HISI_RDR
typedef void (*rdr_funcptr_3)(u32, u32, u32);
static rdr_funcptr_3 g_rdr_cpu_on_off_hook;

void rdr_cpu_on_off_hook_add(rdr_funcptr_3 p_hook_func)
{
 g_rdr_cpu_on_off_hook = p_hook_func;
}

void rdr_cpu_on_off_hook_delete(void)
{
 g_rdr_cpu_on_off_hook = NULL;
}
#else
typedef void (*rdr_funcptr_3)(u32, u32, u32);
void rdr_cpu_on_off_hook_add(rdr_funcptr_3 p_hook_func)
{
}

void rdr_cpu_on_off_hook_delete(void)
{
}
#endif

/**
 * cpufreq_add_dev - add a CPU device
 *
 * Adds the cpufreq interface for a CPU device.
 *
 * The Oracle says: try running cpufreq registration/unregistration concurrently
 * with with cpu hotplugging and all hell will break loose. Tried to clean this
 * mess up, but more thorough testing is needed. - Mathieu
 */
static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
 unsigned int j, cpu = dev->id;
 int ret = -ENOMEM;
 struct cpufreq_policy *policy;
 unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU
 struct cpufreq_governor *gov;
 int sibling;
#endif

 if (cpu_is_offline(cpu))
  return 0;

 pr_debug("adding CPU %u\n", cpu);

#ifdef CONFIG_HISI_RDR
 if (g_rdr_cpu_on_off_hook != NULL)
  g_rdr_cpu_on_off_hook(cpu, 0xff, 1);
#endif

#ifdef CONFIG_SMP
 /* check whether a different CPU already registered this
  * CPU because it is in the same boat. */
 policy = cpufreq_cpu_get(cpu);
 if (unlikely(policy)) {
  cpufreq_cpu_put(policy);
  return 0;
 }

#ifdef CONFIG_HOTPLUG_CPU
 /* Check if this cpu was hot-unplugged earlier and has siblings */
 read_lock_irqsave(&cpufreq_driver_lock, flags);
 for_each_online_cpu(sibling) {
  struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
  if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
   read_unlock_irqrestore(&cpufreq_driver_lock, flags);
   return cpufreq_add_policy_cpu(cpu, sibling, dev);
  }
 }
 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
#endif

 if (!try_module_get(cpufreq_driver->owner)) {
  ret = -EINVAL;
  goto module_out;
 }

 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
 if (!policy)
  goto nomem_out;

 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
  goto err_free_policy;

 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
  goto err_free_cpumask;

 policy->cpu = cpu;
 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
 cpumask_copy(policy->cpus, cpumask_of(cpu));

 /* Initially set CPU itself as the policy_cpu */
 per_cpu(cpufreq_policy_cpu, cpu) = cpu;

 init_completion(&policy->kobj_unregister);
 INIT_WORK(&policy->update, handle_update);

 /* call driver. From then on the cpufreq must be able
  * to accept all calls to ->verify and ->setpolicy for this CPU
  */
 ret = cpufreq_driver->init(policy);
 if (ret) {
  pr_debug("initialization failed\n");
  goto err_set_policy_cpu;
 }

 /* related cpus should atleast have policy->cpus */
 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);

 /*
  * affected cpus must always be the one, which are online. We aren't
  * managing offline cpus here.
  */
 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);

 policy->user_policy.min = policy->min;
 policy->user_policy.max = policy->max;

 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
         CPUFREQ_START, policy);

#ifdef CONFIG_HOTPLUG_CPU
 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
 if (gov) {
  policy->governor = gov;
  pr_debug("Restoring governor %s for cpu %d\n",
         policy->governor->name, cpu);
 }
#endif

 ret = cpufreq_add_dev_interface(cpu, policy, dev);
 if (ret)
  goto err_out_unregister;

 kobject_uevent(&policy->kobj, KOBJ_ADD);
 module_put(cpufreq_driver->owner);
 pr_debug("initialization complete\n");

 return 0;

err_out_unregister:
 write_lock_irqsave(&cpufreq_driver_lock, flags);
 for_each_cpu(j, policy->cpus)
  per_cpu(cpufreq_cpu_data, j) = NULL;
 write_unlock_irqrestore(&cpufreq_driver_lock, flags);

 kobject_put(&policy->kobj);
 wait_for_completion(&policy->kobj_unregister);

err_set_policy_cpu:
 per_cpu(cpufreq_policy_cpu, cpu) = -1;
 free_cpumask_var(policy->related_cpus);
err_free_cpumask:
 free_cpumask_var(policy->cpus);
err_free_policy:
 kfree(policy);
nomem_out:
 module_put(cpufreq_driver->owner);
module_out:
 return ret;
}

 

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值