/* unlocked internal variant */
static inline int pm_qos_get_value(struct pm_qos_constraints *c)
{
struct plist_node *node;
int total_value = 0;
if (plist_head_empty(&c->list))
return c->no_constraint_value;
switch (c->type) {
case PM_QOS_MIN:
return plist_first(&c->list)->prio;
case PM_QOS_MAX:
return plist_last(&c->list)->prio;
case PM_QOS_SUM:
plist_for_each(node, &c->list)
total_value += node->prio;
return total_value;
default:
/* runtime check for not using enum */
BUG();
return PM_QOS_DEFAULT_VALUE;
}
}
s32 pm_qos_read_value(struct pm_qos_constraints *c)
{
return c->target_value;
}
static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
{
c->target_value = value;
}
static inline int pm_qos_get_value(struct pm_qos_constraints *c);
static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
{
struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
struct pm_qos_constraints *c;
struct pm_qos_request *req;
char *type;
unsigned long flags;
int tot_reqs = 0;
int active_reqs = 0;
if (IS_ERR_OR_NULL(qos)) {
pr_err("%s: bad qos param!\n", __func__);
return -EINVAL;
}
c = qos->constraints;
if (IS_ERR_OR_NULL(c)) {
pr_err("%s: Bad constraints on qos?\n", __func__);
return -EINVAL;
}
/* Lock to ensure we have a snapshot */
spin_lock_irqsave(&pm_qos_lock, flags);
if (plist_head_empty(&c->list)) {
seq_puts(s, "Empty!\n");
goto out;
}
switch (c->type) {
case PM_QOS_MIN:
type = "Minimum";
break;
case PM_QOS_MAX:
type = "Maximum";
break;
case PM_QOS_SUM:
type = "Sum";
break;
default:
type = "Unknown";
}
plist_for_each_entry(req, &c->list, node) {
char *state = "Default";
if ((req->node).prio != c->default_value) {
active_reqs++;
state = "Active";
}
tot_reqs++;
seq_printf(s, "%d: %d: %s\n", tot_reqs,
(req->node).prio, state);
}
seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
type, pm_qos_get_value(c), active_reqs, tot_reqs);
out:
spin_unlock_irqrestore(&pm_qos_lock, flags);
return 0;
}
static int pm_qos_dbg_open(struct inode *inode, struct file *file)
{
return single_open(file, pm_qos_dbg_show_requests,
inode->i_private);
}
static const struct file_operations pm_qos_debug_fops = {
.open = pm_qos_dbg_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* pm_qos_update_target - manages the constraints list and calls the notifiers
* if needed
* @c: constraints data struct
* @node: request to add to the list, to update or to remove
* @action: action to take on the constraints list
* @value: value of the request to add or update
*
* This function returns 1 if the aggregated constraint value has changed, 0
* otherwise.
*/
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value)
{
unsigned long flags;
int prev_value, curr_value, new_value;
int ret;
spin_lock_irqsave(&pm_qos_lock, flags);
prev_value = pm_qos_get_value(c);
if (value == PM_QOS_DEFAULT_VALUE)
new_value = c->default_value;
else
new_value = value;
switch (action) {
case PM_QOS_REMOVE_REQ:
plist_del(node, &c->list);
break;
case PM_QOS_UPDATE_REQ:
/*
* to change the list, we atomically remove, reinit
* with new value and add, then see if the extremal
* changed
*/
plist_del(node, &c->list);
case PM_QOS_ADD_REQ:
plist_node_init(node, new_value);
plist_add(node, &c->list);
break;
default:
/* no action */
;
}
curr_value = pm_qos_get_value(c);
pm_qos_set_value(c, curr_value);
spin_unlock_irqrestore(&pm_qos_lock, flags);
trace_pm_qos_update_target(action, prev_value, curr_value);
if (prev_value != curr_value) {
ret = 1;
if (c->notifiers)
blocking_notifier_call_chain(c->notifiers,
(unsigned long)curr_value,
NULL);
} else {
ret = 0;
}
return ret;
}
/**
* pm_qos_flags_remove_req - Remove device PM QoS flags request.
* @pqf: Device PM QoS flags set to remove the request from.
* @req: Request to remove from the set.
*/
static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req)
{
s32 val = 0;
list_del(&req->node);
list_for_each_entry(req, &pqf->list, node)
val |= req->flags;
pqf->effective_flags = val;
}
/**
* pm_qos_update_flags - Update a set of PM QoS flags.
* @pqf: Set of flags to update.
* @req: Request to add to the set, to modify, or to remove from the set.
* @action: Action to take on the set.
* @val: Value of the request to add or modify.
*
* Update the given set of PM QoS flags and call notifiers if the aggregate
* value has changed. Returns 1 if the aggregate constraint value has changed,
* 0 otherwise.
*/
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req,
enum pm_qos_req_action action, s32 val)
{
unsigned long irqflags;
s32 prev_value, curr_value;
spin_lock_irqsave(&pm_qos_lock, irqflags);
prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
switch (action) {
case PM_QOS_REMOVE_REQ:
pm_qos_flags_remove_req(pqf, req);
break;
case PM_QOS_UPDATE_REQ:
pm_qos_flags_remove_req(pqf, req);
case PM_QOS_ADD_REQ:
req->flags = val;
INIT_LIST_HEAD(&req->node);
list_add_tail(&req->node, &pqf->list);
pqf->effective_flags |= val;
break;
default:
/* no action */
;
}
curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
spin_unlock_irqrestore(&pm_qos_lock, irqflags);
trace_pm_qos_update_flags(action, prev_value, curr_value);
return prev_value != curr_value;
}
/**
* pm_qos_request - returns current system wide qos expectation
* @pm_qos_class: identification of which qos value is requested
*
* This function returns the current target value.
*/
int pm_qos_request(int pm_qos_class)
{
return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
}
EXPORT_SYMBOL_GPL(pm_qos_request);
int pm_qos_request_active(struct pm_qos_request *req)
{
return req->pm_qos_class != 0;
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
static void __pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
trace_pm_qos_update_request(req->pm_qos_class, new_value);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
}
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
*
* This cancels the timeout request by falling back to the default at timeout.
*/
static void pm_qos_work_fn(struct work_struct *work)
{
struct pm_qos_request *req = container_of(to_delayed_work(work),
struct pm_qos_request,
work);
__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
/**
* pm_qos_add_request - inserts new qos request into the list
* @req: pointer to a preallocated handle
* @pm_qos_class: identifies which list of qos request to use
* @value: defines the qos request
*
* This function inserts a new entry in the pm_qos_class list of requested qos
* performance characteristics. It recomputes the aggregate QoS expectations
* for the pm_qos_class of parameters and initializes the pm_qos_request
* handle. Caller needs to save this handle for later use in updates and
* removal.
*/
void pm_qos_add_request(struct pm_qos_request *req,
int pm_qos_class, s32 value)
{
if (!req) /*guard against callers passing in null */
return;
if (pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
return;
}
req->pm_qos_class = pm_qos_class;
INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
trace_pm_qos_add_request(pm_qos_class, value);
pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
&req->node, PM_QOS_ADD_REQ, value);
}
EXPORT_SYMBOL_GPL(pm_qos_add_request);
/**
* pm_qos_update_request - modifies an existing qos request
* @req : handle to list element holding a pm_qos request to use
* @value: defines the qos request
*
* Updates an existing qos request for the pm_qos_class of parameters along
* with updating the target pm_qos_class value.
*
* Attempts are made to make this code callable on hot code paths.
*/
void pm_qos_update_request(struct pm_qos_request *req,
s32 new_value)
{
if (!req) /*guard against callers passing in null */
return;
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
return;
}
cancel_delayed_work_sync(&req->work);
__pm_qos_update_request(req, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
/**
* pm_qos_update_request_timeout - modifies an existing qos request temporarily.
* @req : handle to list element holding a pm_qos request to use
* @new_value: defines the temporal qos request
* @timeout_us: the effective duration of this qos request in usecs.
*
* After timeout_us, this qos request is cancelled automatically.
*/
void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
unsigned long timeout_us)
{
if (!req)
return;
if (WARN(!pm_qos_request_active(req),
"%s called for unknown object.", __func__))
return;
cancel_delayed_work_sync(&req->work);
trace_pm_qos_update_request_timeout(req->pm_qos_class,
new_value, timeout_us);
if (new_value != req->node.prio)
pm_qos_update_target(
pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_UPDATE_REQ, new_value);
schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
}
/**
* pm_qos_remove_request - modifies an existing qos request
* @req: handle to request list element
*
* Will remove pm qos request from the list of constraints and
* recompute the current target value for the pm_qos_class. Call this
* on slow code paths.
*/
void pm_qos_remove_request(struct pm_qos_request *req)
{
if (!req) /*guard against callers passing in null */
return;
/* silent return to keep pcm code cleaner */
if (!pm_qos_request_active(req)) {
WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
return;
}
cancel_delayed_work_sync(&req->work);
trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
&req->node, PM_QOS_REMOVE_REQ,
PM_QOS_DEFAULT_VALUE);
memset(req, 0, sizeof(*req));
}
EXPORT_SYMBOL_GPL(pm_qos_remove_request);
/**
* pm_qos_add_notifier - sets notification entry for changes to target value
* @pm_qos_class: identifies which qos target changes should be notified.
* @notifier: notifier block managed by caller.
*
* will register the notifier into a notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
retval = blocking_notifier_chain_register(
pm_qos_array[pm_qos_class]->constraints->notifiers,
notifier);
return retval;
}
EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
/**
* pm_qos_remove_notifier - deletes notification entry from chain.
* @pm_qos_class: identifies which qos target changes are notified.
* @notifier: notifier block to be removed.
*
* will remove the notifier from the notification chain that gets called
* upon changes to the pm_qos_class target value.
*/
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
{
int retval;
retval = blocking_notifier_chain_unregister(
pm_qos_array[pm_qos_class]->constraints->notifiers,
notifier);
return retval;
}
EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
/* User space interface to PM QoS classes via misc devices */
static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
{
qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
qos->pm_qos_power_miscdev.name = qos->name;
qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
if (d) {
(void)debugfs_create_file(qos->name, S_IRUGO, d,
(void *)qos, &pm_qos_debug_fops);
}
return misc_register(&qos->pm_qos_power_miscdev);
}
static int find_pm_qos_object_by_minor(int minor)
{
int pm_qos_class;
for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
if (minor ==
pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
return pm_qos_class;
}
return -1;
}
static int pm_qos_power_open(struct inode *inode, struct file *filp)
{
long pm_qos_class;
pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
filp->private_data = req;
return 0;
}
return -EPERM;
}
static int pm_qos_power_release(struct inode *inode, struct file *filp)
{
struct pm_qos_request *req;
req = filp->private_data;
pm_qos_remove_request(req);
kfree(req);
return 0;
}
static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
unsigned long flags;
struct pm_qos_request *req = filp->private_data;
if (!req)
return -EINVAL;
if (!pm_qos_request_active(req))
return -EINVAL;
spin_lock_irqsave(&pm_qos_lock, flags);
value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
spin_unlock_irqrestore(&pm_qos_lock, flags);
return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
}
static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
s32 value;
struct pm_qos_request *req;
if (count == sizeof(s32)) {
if (copy_from_user(&value, buf, sizeof(s32)))
return -EFAULT;
} else {
int ret;
ret = kstrtos32_from_user(buf, count, 16, &value);
if (ret)
return ret;
}
req = filp->private_data;
pm_qos_update_request(req, value);
return count;
}
static int __init pm_qos_power_init(void)
{
int ret = 0;
int i;
struct dentry *d;
BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
d = debugfs_create_dir("pm_qos", NULL);
if (IS_ERR_OR_NULL(d))
d = NULL;
for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
ret = register_pm_qos_misc(pm_qos_array[i], d);
if (ret < 0) {
printk(KERN_ERR "pm_qos_param: %s setup failed\n",
pm_qos_array[i]->name);
return ret;
}
}
return ret;
}
late_initcall(pm_qos_power_init);
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/power/snapshot.c
/*
* linux/kernel/power/snapshot.c
*
* This file provides system snapshot/restore functionality for swsusp.
*
* Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*
*/
#include <linux/version.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/suspend.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
#include <linux/console.h>
#include <linux/highmem.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/compiler.h>
#include <linux/ktime.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/io.h>
#include "power.h"
static int swsusp_page_is_free(struct page *);
static void swsusp_set_page_forbidden(struct page *);
static void swsusp_unset_page_forbidden(struct page *);
/*
* Number of bytes to reserve for memory allocations made by device drivers
* from their ->freeze() and ->freeze_noirq() callbacks so that they don't
* cause image creation to fail (tunable via /sys/power/reserved_size).
*/
unsigned long reserved_size;
void __init hibernate_reserved_size_init(void)
{
reserved_size = SPARE_PAGES * PAGE_SIZE;
}
/*
* Preferred image size in bytes (tunable via /sys/power/image_size).
* When it is set to N, swsusp will do its best to ensure the image
* size will not exceed N bytes, but if that is impossible, it will
* try to create the smallest image possible.
*/
unsigned long image_size;
void __init hibernate_image_size_init(void)
{
image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
}
/* List of PBEs needed for restoring the pages that were allocated before
* the suspend and included in the suspend image, but have also been
* allocated by the "resume" kernel, so their contents cannot be written
* directly to their "original" page frames.
*/
struct pbe *restore_pblist;
/* Pointer to an auxiliary buffer (1 page) */
static void *buffer;
/**
* @safe_needed - on resume, for storing the PBE list and the image,
* we can only use memory pages that do not conflict with the pages
* used before suspend. The unsafe pages have PageNosaveFree set
* and we count them using unsafe_pages.
*
* Each allocated image page is marked as PageNosave and PageNosaveFree
* so that swsusp_free() can release it.
*/
#define PG_ANY 0
#define PG_SAFE 1
#define PG_UNSAFE_CLEAR 1
#define PG_UNSAFE_KEEP 0
static unsigned int allocated_unsafe_pages;
static void *get_image_page(gfp_t gfp_mask, int safe_needed)
{
void *res;
res = (void *)get_zeroed_page(gfp_mask);
if (safe_needed)
while (res && swsusp_page_is_free(virt_to_page(res))) {
/* The page is unsafe, mark it for swsusp_free() */
swsusp_set_page_forbidden(virt_to_page(res));
allocated_unsafe_pages++;
res = (void *)get_zeroed_page(gfp_mask);
}
if (res) {
swsusp_set_page_forbidden(virt_to_page(res));
swsusp_set_page_free(virt_to_page(res));
}
return res;
}
unsigned long get_safe_page(gfp_t gfp_mask)
{
return (unsigned long)get_image_page(gfp_mask, PG_SAFE);
}
static struct page *alloc_image_page(gfp_t gfp_mask)
{
struct page *page;
page = alloc_page(gfp_mask);
if (page) {
swsusp_set_page_forbidden(page);
swsusp_set_page_free(page);
}
return page;
}
/**
* free_image_page - free page represented by @addr, allocated with
* get_image_page (page flags set by it must be cleared)
*/
static inline void free_image_page(void *addr, int clear_nosave_free)
{
struct page *page;
BUG_ON(!virt_addr_valid(addr));
page = virt_to_page(addr);
swsusp_unset_page_forbidden(page);
if (clear_nosave_free)
swsusp_unset_page_free(page);
__free_page(page);
}
/* struct linked_page is used to build chains of pages */
#define LINKED_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(void *))
struct linked_page {
struct linked_page *next;
char data[LINKED_PAGE_DATA_SIZE];
} __packed;
static inline void
free_list_of_pages(struct linked_page *list, int clear_page_nosave)
{
while (list) {
struct linked_page *lp = list->next;
free_image_page(list, clear_page_nosave);
list = lp;
}
}
/**
* struct chain_allocator is used for allocating small objects out of
* a linked list of pages called 'the chain'.
*
* The chain grows each time when there is no room for a new object in
* the current page. The allocated objects cannot be freed individually.
* It is only possible to free them all at once, by freeing the entire
* chain.
*
* NOTE: The chain allocator may be inefficient if the allocated objects
* are not much smaller than PAGE_SIZE.
*/
struct chain_allocator {
struct linked_page *chain; /* the chain */
unsigned int used_space; /* total size of objects allocated out
* of the current page
*/
gfp_t gfp_mask; /* mask for allocating pages */
int safe_needed; /* if set, only "safe" pages are allocated */
};
static void
chain_init(struct chain_allocator *ca, gfp_t gfp_mask, int safe_needed)
{
ca->chain = NULL;
ca->used_space = LINKED_PAGE_DATA_SIZE;
ca->gfp_mask = gfp_mask;
ca->safe_needed = safe_needed;
}
static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
{
void *ret;
if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
struct linked_page *lp;
lp = get_image_page(ca->gfp_mask, ca->safe_needed);
if (!lp)
return NULL;
lp->next = ca->chain;
ca->chain = lp;
ca->used_space = 0;
}
ret = ca->chain->data + ca->used_space;
ca->used_space += size;
return ret;
}
/**
* Data types related to memory bitmaps.
*
* Memory bitmap is a structure consiting of many linked lists of
* objects. The main list's elements are of type struct zone_bitmap
* and each of them corresonds to one zone. For each zone bitmap
* object there is a list of objects of type struct bm_block that
* represent each blocks of bitmap in which information is stored.
*
* struct memory_bitmap contains a pointer to the main list of zone
* bitmap objects, a struct bm_position used for browsing the bitmap,
* and a pointer to the list of pages used for allocating all of the
* zone bitmap objects and bitmap block objects.
*
* NOTE: It has to be possible to lay out the bitmap in memory
* using only allocations of order 0. Additionally, the bitmap is
* designed to work with arbitrary number of zones (this is over the
* top for now, but let's avoid making unnecessary assumptions ;-).
*
* struct zone_bitmap contains a pointer to a list of bitmap block
* objects and a pointer to the bitmap block object that has been
* most recently used for setting bits. Additionally, it contains the
* pfns that correspond to the start and end of the represented zone.
*
* struct bm_block contains a pointer to the memory page in which
* information is stored (in the form of a block of bitmap)
* It also contains the pfns that correspond to the start and end of
* the represented memory area.
*
* The memory bitmap is organized as a radix tree to guarantee fast random
* access to the bits. There is one radix tree for each zone (as returned
* from create_mem_extents).
*
* One radix tree is represented by one struct mem_zone_bm_rtree. There are
* two linked lists for the nodes of the tree, one for the inner nodes and
* one for the leave nodes. The linked leave nodes are used for fast linear
* access of the memory bitmap.
*
* The struct rtree_node represents one node of the radix tree.
*/
#define BM_END_OF_MAP (~0UL)
#define BM_BITS_PER_BLOCK (PAGE_SIZE * BITS_PER_BYTE)
#define BM_BLOCK_SHIFT (PAGE_SHIFT + 3)
#define BM_BLOCK_MASK ((1UL << BM_BLOCK_SHIFT) - 1)
/*
* struct rtree_node is a wrapper struct to link the nodes
* of the rtree together for easy linear iteration over
* bits and easy freeing
*/
struct rtree_node {
struct list_head list;
unsigned long *data;
};
/*
* struct mem_zone_bm_rtree represents a bitmap used for one
* populated memory zone.
*/
struct mem_zone_bm_rtree {
struct list_head list; /* Link Zones together */
struct list_head nodes; /* Radix Tree inner nodes */
struct list_head leaves; /* Radix Tree leaves */
unsigned long start_pfn; /* Zone start page frame */
unsigned long end_pfn; /* Zone end page frame + 1 */
struct rtree_node *rtree; /* Radix Tree Root */
int levels; /* Number of Radix Tree Levels */
unsigned int blocks; /* Number of Bitmap Blocks */
};
/* strcut bm_position is used for browsing memory bitmaps */
struct bm_position {
struct mem_zone_bm_rtree *zone;
struct rtree_node *node;
unsigned long node_pfn;
int node_bit;
};
struct memory_bitmap {
struct list_head zones;
struct linked_page *p_list; /* list of pages used to store zone
* bitmap objects and bitmap block
* objects
*/
struct bm_position cur; /* most recently used bit position */
};
/* Functions that operate on memory bitmaps */
#define BM_ENTRIES_PER_LEVEL (PAGE_SIZE / sizeof(unsigned long))
#if BITS_PER_LONG == 32
#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 2)
#else
#define BM_RTREE_LEVEL_SHIFT (PAGE_SHIFT - 3)
#endif
#define BM_RTREE_LEVEL_MASK ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
/*
* alloc_rtree_node - Allocate a new node and add it to the radix tree.
*
* This function is used to allocate inner nodes as well as the
* leave nodes of the radix tree. It also adds the node to the
* corresponding linked list passed in by the *list parameter.
*/
static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
struct chain_allocator *ca,
struct list_head *list)
{
struct rtree_node *node;
node = chain_alloc(ca, sizeof(struct rtree_node));
if (!node)
return NULL;
node->data = get_image_page(gfp_mask, safe_needed);
if (!node->data)
return NULL;
list_add_tail(&node->list, list);
return node;
}
/*
* add_rtree_block - Add a new leave node to the radix tree
*
* The leave nodes need to be allocated in order to keep the leaves
* linked list in order. This is guaranteed by the zone->blocks
* counter.
*/
static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
int safe_needed, struct chain_allocator *ca)
{
struct rtree_node *node, *block, **dst;
unsigned int levels_needed, block_nr;
int i;
block_nr = zone->blocks;
levels_needed = 0;
/* How many levels do we need for this block nr? */
while (block_nr) {
levels_needed += 1;
block_nr >>= BM_RTREE_LEVEL_SHIFT;
}
/* Make sure the rtree has enough levels */
for (i = zone->levels; i < levels_needed; i++) {
node = alloc_rtree_node(gfp_mask, safe_needed, ca,
&zone->nodes);
if (!node)
return -ENOMEM;
node->data[0] = (unsigned long)zone->rtree;
zone->rtree = node;
zone->levels += 1;
}
/* Allocate new block */
block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
if (!block)
return -ENOMEM;
/* Now walk the rtree to insert the block */
node = zone->rtree;
dst = &zone->rtree;
block_nr = zone->blocks;
for (i = zone->levels; i > 0; i--) {
int index;
if (!node) {
node = alloc_rtree_node(gfp_mask, safe_needed, ca,
&zone->nodes);
if (!node)
return -ENOMEM;
*dst = node;
}
index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
index &= BM_RTREE_LEVEL_MASK;
dst = (struct rtree_node **)&((*dst)->data[index]);
node = *dst;
}
zone->blocks += 1;
*dst = block;
return 0;
}
static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
int clear_nosave_free);
/*
* create_zone_bm_rtree - create a radix tree for one zone
*
* Allocated the mem_zone_bm_rtree structure and initializes it.
* This function also allocated and builds the radix tree for the
* zone.
*/
static struct mem_zone_bm_rtree *
create_zone_bm_rtree(gfp_t gfp_mask, int safe_needed,
struct chain_allocator *ca,
unsigned long start, unsigned long end)
{
struct mem_zone_bm_rtree *zone;
unsigned int i, nr_blocks;
unsigned long pages;
pages = end - start;
zone = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
if (!zone)
return NULL;
INIT_LIST_HEAD(&zone->nodes);
INIT_LIST_HEAD(&zone->leaves);
zone->start_pfn = start;
zone->end_pfn = end;
nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
for (i = 0; i < nr_blocks; i++) {
if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
return NULL;
}
}
return zone;
}
/*
* free_zone_bm_rtree - Free the memory of the radix tree
*
* Free all node pages of the radix tree. The mem_zone_bm_rtree
* structure itself is not freed here nor are the rtree_node
* structs.
*/
static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
int clear_nosave_free)
{
struct rtree_node *node;
list_for_each_entry(node, &zone->nodes, list)
free_image_page(node->data, clear_nosave_free);
list_for_each_entry(node, &zone->leaves, list)
free_image_page(node->data, clear_nosave_free);
}
static void memory_bm_position_reset(struct memory_bitmap *bm)
{
bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
list);
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
struct rtree_node, list);
bm->cur.node_pfn = 0;
bm->cur.node_bit = 0;
}
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
struct mem_extent {
struct list_head hook;
unsigned long start;
unsigned long end;
};
/**
* free_mem_extents - free a list of memory extents
* @list - list of extents to empty
*/
static void free_mem_extents(struct list_head *list)
{
struct mem_extent *ext, *aux;
list_for_each_entry_safe(ext, aux, list, hook) {
list_del(&ext->hook);
kfree(ext);
}
}
/**
* create_mem_extents - create a list of memory extents representing
* contiguous ranges of PFNs
* @list - list to put the extents into
* @gfp_mask - mask to use for memory allocations
*/
static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
{
struct zone *zone;
INIT_LIST_HEAD(list);
for_each_populated_zone(zone) {
unsigned long zone_start, zone_end;
struct mem_extent *ext, *cur, *aux;
zone_start = zone->zone_start_pfn;
zone_end = zone_end_pfn(zone);
list_for_each_entry(ext, list, hook)
if (zone_start <= ext->end)
break;
if (&ext->hook == list || zone_end < ext->start) {
/* New extent is necessary */
struct mem_extent *new_ext;
new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
if (!new_ext) {
free_mem_extents(list);
return -ENOMEM;
}
new_ext->start = zone_start;
new_ext->end = zone_end;
list_add_tail(&new_ext->hook, &ext->hook);
continue;
}
/* Merge this zone's range of PFNs with the existing one */
if (zone_start < ext->start)
ext->start = zone_start;
if (zone_end > ext->end)
ext->end = zone_end;
/* More merging may be possible */
cur = ext;
list_for_each_entry_safe_continue(cur, aux, list, hook) {
if (zone_end < cur->start)
break;
if (zone_end < cur->end)
ext->end = cur->end;
list_del(&cur->hook);
kfree(cur);
}
}
return 0;
}
/**
* memory_bm_create - allocate memory for a memory bitmap
*/
static int
memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask, int safe_needed)
{
struct chain_allocator ca;
struct list_head mem_extents;
struct mem_extent *ext;
int error;
chain_init(&ca, gfp_mask, safe_needed);
INIT_LIST_HEAD(&bm->zones);
error = create_mem_extents(&mem_extents, gfp_mask);
if (error)
return error;
list_for_each_entry(ext, &mem_extents, hook) {
struct mem_zone_bm_rtree *zone;
zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
ext->start, ext->end);
if (!zone) {
error = -ENOMEM;
goto Error;
}
list_add_tail(&zone->list, &bm->zones);
}
bm->p_list = ca.chain;
memory_bm_position_reset(bm);
Exit:
free_mem_extents(&mem_extents);
return error;
Error:
bm->p_list = ca.chain;
memory_bm_free(bm, PG_UNSAFE_CLEAR);
goto Exit;
}
/**
* memory_bm_free - free memory occupied by the memory bitmap @bm
*/
static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
{
struct mem_zone_bm_rtree *zone;
list_for_each_entry(zone, &bm->zones, list)
free_zone_bm_rtree(zone, clear_nosave_free);
free_list_of_pages(bm->p_list, clear_nosave_free);
INIT_LIST_HEAD(&bm->zones);
}
/**
* memory_bm_find_bit - Find the bit for pfn in the memory
* bitmap
*
* Find the bit in the bitmap @bm that corresponds to given pfn.
* The cur.zone, cur.block and cur.node_pfn member of @bm are
* updated.
* It walks the radix tree to find the page which contains the bit for
* pfn and returns the bit position in **addr and *bit_nr.
*/
static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
void **addr, unsigned int *bit_nr)
{
struct mem_zone_bm_rtree *curr, *zone;
struct rtree_node *node;
int i, block_nr;
zone = bm->cur.zone;
if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
goto zone_found;
zone = NULL;
/* Find the right zone */
list_for_each_entry(curr, &bm->zones, list) {
if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
zone = curr;
break;
}
}
if (!zone)
return -EFAULT;
zone_found:
/*
* We have a zone. Now walk the radix tree to find the leave
* node for our pfn.
*/
node = bm->cur.node;
if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
goto node_found;
node = zone->rtree;
block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
for (i = zone->levels; i > 0; i--) {
int index;
index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
index &= BM_RTREE_LEVEL_MASK;
BUG_ON(node->data[index] == 0);
node = (struct rtree_node *)node->data[index];
}
node_found:
/* Update last position */
bm->cur.zone = zone;
bm->cur.node = node;
bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
/* Set return values */
*addr = node->data;
*bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
return 0;
}
static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
int error;
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
BUG_ON(error);
set_bit(bit, addr);
}
static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
int error;
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
if (!error)
set_bit(bit, addr);
return error;
}
static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
int error;
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
BUG_ON(error);
clear_bit(bit, addr);
}
static void memory_bm_clear_current(struct memory_bitmap *bm)
{
int bit;
bit = max(bm->cur.node_bit - 1, 0);
clear_bit(bit, bm->cur.node->data);
}
static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
int error;
error = memory_bm_find_bit(bm, pfn, &addr, &bit);
BUG_ON(error);
return test_bit(bit, addr);
}
static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
{
void *addr;
unsigned int bit;
return !memory_bm_find_bit(bm, pfn, &addr, &bit);
}
/*
* rtree_next_node - Jumps to the next leave node
*
* Sets the position to the beginning of the next node in the
* memory bitmap. This is either the next node in the current
* zone's radix tree or the first node in the radix tree of the
* next zone.
*
* Returns true if there is a next node, false otherwise.
*/
static bool rtree_next_node(struct memory_bitmap *bm)
{
bm->cur.node = list_entry(bm->cur.node->list.next,
struct rtree_node, list);
if (&bm->cur.node->list != &bm->cur.zone->leaves) {
bm->cur.node_pfn += BM_BITS_PER_BLOCK;
bm->cur.node_bit = 0;
touch_softlockup_watchdog();
return true;
}
/* No more nodes, goto next zone */
bm->cur.zone = list_entry(bm->cur.zone->list.next,
struct mem_zone_bm_rtree, list);
if (&bm->cur.zone->list != &bm->zones) {
bm->cur.node = list_entry(bm->cur.zone->leaves.next,
struct rtree_node, list);
bm->cur.node_pfn = 0;
bm->cur.node_bit = 0;
return true;
}
/* No more zones */
return false;
}
/**
* memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
*
* Starting from the last returned position this function searches
* for the next set bit in the memory bitmap and returns its
* number. If no more bit is set BM_END_OF_MAP is returned.
*
* It is required to run memory_bm_position_reset() before the
* first call to this function.
*/
static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
{
unsigned long bits, pfn, pages;
int bit;
do {
pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
bit = find_next_bit(bm->cur.node->data, bits,
bm->cur.node_bit);
if (bit < bits) {
pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
bm->cur.node_bit = bit + 1;
return pfn;
}
} while (rtree_next_node(bm));
return BM_END_OF_MAP;
}
/**
* This structure represents a range of page frames the contents of which
* should not be saved during the suspend.
*/
struct nosave_region {
struct list_head list;
unsigned long start_pfn;
unsigned long end_pfn;
};
static LIST_HEAD(nosave_regions);
/**
* register_nosave_region - register a range of page frames the contents
* of which should not be saved during the suspend (to be used in the early
* initialization code)
*/
void __init
__register_nosave_region(unsigned long start_pfn, unsigned long end_pfn,
int use_kmalloc)
{
struct nosave_region *region;
if (start_pfn >= end_pfn)
return;
if (!list_empty(&nosave_regions)) {
/* Try to extend the previous region (they should be sorted) */
region = list_entry(nosave_regions.prev,
struct nosave_region, list);
if (region->end_pfn == start_pfn) {
region->end_pfn = end_pfn;
goto Report;
}
}
if (use_kmalloc) {
/* during init, this shouldn't fail */
region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
BUG_ON(!region);
} else
/* This allocation cannot fail */
region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
list_add_tail(®ion->list, &nosave_regions);
Report:
printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
(unsigned long long) start_pfn << PAGE_SHIFT,
((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
}
/*
* Set bits in this map correspond to the page frames the contents of which
* should not be saved during the suspend.
*/
static struct memory_bitmap *forbidden_pages_map;
/* Set bits in this map correspond to free page frames. */
static struct memory_bitmap *free_pages_map;
/*
* Each page frame allocated for creating the image is marked by setting the
* corresponding bits in forbidden_pages_map and free_pages_map simultaneously
*/
void swsusp_set_page_free(struct page *page)
{
if (free_pages_map)
memory_bm_set_bit(free_pages_map, page_to_pfn(page));
}
static int swsusp_page_is_free(struct page *page)
{
return free_pages_map ?
memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
}
void swsusp_unset_page_free(struct page *page)
{
if (free_pages_map)
memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
}
static void swsusp_set_page_forbidden(struct page *page)
{
if (forbidden_pages_map)
memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
}
int swsusp_page_is_forbidden(struct page *page)
{
return forbidden_pages_map ?
memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
}
static void swsusp_unset_page_forbidden(struct page *page)
{
if (forbidden_pages_map)
memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
}
/**
* mark_nosave_pages - set bits corresponding to the page frames the
* contents of which should not be saved in a given bitmap.
*/
static void mark_nosave_pages(struct memory_bitmap *bm)
{
struct nosave_region *region;
if (list_empty(&nosave_regions))
return;
list_for_each_entry(region, &nosave_regions, list) {
unsigned long pfn;
pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
(unsigned long long) region->start_pfn << PAGE_SHIFT,
((unsigned long long) region->end_pfn << PAGE_SHIFT)
- 1);
for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
if (pfn_valid(pfn)) {
/*
* It is safe to ignore the result of
* mem_bm_set_bit_check() here, since we won't
* touch the PFNs for which the error is
* returned anyway.
*/
mem_bm_set_bit_check(bm, pfn);
}
}
}
/**
* create_basic_memory_bitmaps - create bitmaps needed for marking page
* frames that should not be saved and free page frames. The pointers
* forbidden_pages_map and free_pages_map are only modified if everything
* goes well, because we don't want the bits to be used before both bitmaps
* are set up.
*/
int create_basic_memory_bitmaps(void)
{
struct memory_bitmap *bm1, *bm2;
int error = 0;
if (forbidden_pages_map && free_pages_map)
return 0;
else
BUG_ON(forbidden_pages_map || free_pages_map);
bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
if (!bm1)
return -ENOMEM;
error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
if (error)
goto Free_first_object;
bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
if (!bm2)
goto Free_first_bitmap;
error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
if (error)
goto Free_second_object;
forbidden_pages_map = bm1;
free_pages_map = bm2;
mark_nosave_pages(forbidden_pages_map);
pr_debug("PM: Basic memory bitmaps created\n");
return 0;
Free_second_object:
kfree(bm2);
Free_first_bitmap:
memory_bm_free(bm1, PG_UNSAFE_CLEAR);
Free_first_object:
kfree(bm1);
return -ENOMEM;
}
/**
* free_basic_memory_bitmaps - free memory bitmaps allocated by
* create_basic_memory_bitmaps(). The auxiliary pointers are necessary
* so that the bitmaps themselves are not referred to while they are being
* freed.
*/
void free_basic_memory_bitmaps(void)
{
struct memory_bitmap *bm1, *bm2;
if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
return;
bm1 = forbidden_pages_map;
bm2 = free_pages_map;
forbidden_pages_map = NULL;
free_pages_map = NULL;
memory_bm_free(bm1, PG_UNSAFE_CLEAR);
kfree(bm1);
memory_bm_free(bm2, PG_UNSAFE_CLEAR);
kfree(bm2);
pr_debug("PM: Basic memory bitmaps freed\n");
}
/**
* snapshot_additional_pages - estimate the number of additional pages
* be needed for setting up the suspend image data structures for given
* zone (usually the returned value is greater than the exact number)
*/
unsigned int snapshot_additional_pages(struct zone *zone)
{
unsigned int rtree, nodes;
rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
LINKED_PAGE_DATA_SIZE);
while (nodes > 1) {
nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
rtree += nodes;
}
return 2 * rtree;
}
#ifdef CONFIG_HIGHMEM
/**
* count_free_highmem_pages - compute the total number of free highmem
* pages, system-wide.
*/
static unsigned int count_free_highmem_pages(void)
{
struct zone *zone;
unsigned int cnt = 0;
for_each_populated_zone(zone)
if (is_highmem(zone))
cnt += zone_page_state(zone, NR_FREE_PAGES);
return cnt;
}
/**
* saveable_highmem_page - Determine whether a highmem page should be
* included in the suspend image.
*
* We should save the page if it isn't Nosave or NosaveFree, or Reserved,
* and it isn't a part of a free chunk of pages.
*/
static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
{
struct page *page;
if (!pfn_valid(pfn))
return NULL;
page = pfn_to_page(pfn);
if (page_zone(page) != zone)
return NULL;
BUG_ON(!PageHighMem(page));
if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page) ||
PageReserved(page))
return NULL;
if (page_is_guard(page))
return NULL;
return page;
}
/**
* count_highmem_pages - compute the total number of saveable highmem
* pages.
*/
static unsigned int count_highmem_pages(void)
{
struct zone *zone;
unsigned int n = 0;
for_each_populated_zone(zone) {
unsigned long pfn, max_zone_pfn;
if (!is_highmem(zone))
continue;
mark_free_pages(zone);
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (saveable_highmem_page(zone, pfn))
n++;
}
return n;
}
#else
static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
{
return NULL;
}
#endif /* CONFIG_HIGHMEM */
/**
* saveable_page - Determine whether a non-highmem page should be included
* in the suspend image.
*
* We should save the page if it isn't Nosave, and is not in the range
* of pages statically defined as 'unsaveable', and it isn't a part of
* a free chunk of pages.
*/
static struct page *saveable_page(struct zone *zone, unsigned long pfn)
{
struct page *page;
if (!pfn_valid(pfn))
return NULL;
page = pfn_to_page(pfn);
if (page_zone(page) != zone)
return NULL;
BUG_ON(PageHighMem(page));
if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
return NULL;
if (PageReserved(page)
&& (!kernel_page_present(page) || pfn_is_nosave(pfn)))
return NULL;
if (page_is_guard(page))
return NULL;
return page;
}
/**
* count_data_pages - compute the total number of saveable non-highmem
* pages.
*/
static unsigned int count_data_pages(void)
{
struct zone *zone;
unsigned long pfn, max_zone_pfn;
unsigned int n = 0;
for_each_populated_zone(zone) {
if (is_highmem(zone))
continue;
mark_free_pages(zone);
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (saveable_page(zone, pfn))
n++;
}
return n;
}
/* This is needed, because copy_page and memcpy are not usable for copying
* task structs.
*/
static inline void do_copy_page(long *dst, long *src)
{
int n;
for (n = PAGE_SIZE / sizeof(long); n; n--)
*dst++ = *src++;
}
/**
* safe_copy_page - check if the page we are going to copy is marked as
* present in the kernel page tables (this always is the case if
* CONFIG_DEBUG_PAGEALLOC is not set and in that case
* kernel_page_present() always returns 'true').
*/
static void safe_copy_page(void *dst, struct page *s_page)
{
if (kernel_page_present(s_page)) {
do_copy_page(dst, page_address(s_page));
} else {
kernel_map_pages(s_page, 1, 1);
do_copy_page(dst, page_address(s_page));
kernel_map_pages(s_page, 1, 0);
}
}
#ifdef CONFIG_HIGHMEM
static inline struct page *
page_is_saveable(struct zone *zone, unsigned long pfn)
{
return is_highmem(zone) ?
saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
}
static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
struct page *s_page, *d_page;
void *src, *dst;
s_page = pfn_to_page(src_pfn);
d_page = pfn_to_page(dst_pfn);
if (PageHighMem(s_page)) {
src = kmap_atomic(s_page);
dst = kmap_atomic(d_page);
do_copy_page(dst, src);
kunmap_atomic(dst);
kunmap_atomic(src);
} else {
if (PageHighMem(d_page)) {
/* Page pointed to by src may contain some kernel
* data modified by kmap_atomic()
*/
safe_copy_page(buffer, s_page);
dst = kmap_atomic(d_page);
copy_page(dst, buffer);
kunmap_atomic(dst);
} else {
safe_copy_page(page_address(d_page), s_page);
}
}
}
#else
#define page_is_saveable(zone, pfn) saveable_page(zone, pfn)
static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
{
safe_copy_page(page_address(pfn_to_page(dst_pfn)),
pfn_to_page(src_pfn));
}
#endif /* CONFIG_HIGHMEM */
static void
copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm)
{
struct zone *zone;
unsigned long pfn;
for_each_populated_zone(zone) {
unsigned long max_zone_pfn;
mark_free_pages(zone);
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (page_is_saveable(zone, pfn))
memory_bm_set_bit(orig_bm, pfn);
}
memory_bm_position_reset(orig_bm);
memory_bm_position_reset(copy_bm);
for(;;) {
pfn = memory_bm_next_pfn(orig_bm);
if (unlikely(pfn == BM_END_OF_MAP))
break;
copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
}
}
/* Total number of image pages */
static unsigned int nr_copy_pages;
/* Number of pages needed for saving the original pfns of the image pages */
static unsigned int nr_meta_pages;
/*
* Numbers of normal and highmem page frames allocated for hibernation image
* before suspending devices.
*/
unsigned int alloc_normal, alloc_highmem;
/*
* Memory bitmap used for marking saveable pages (during hibernation) or
* hibernation image pages (during restore)
*/
static struct memory_bitmap orig_bm;
/*
* Memory bitmap used during hibernation for marking allocated page frames that
* will contain copies of saveable pages. During restore it is initially used
* for marking hibernation image pages, but then the set bits from it are
* duplicated in @orig_bm and it is released. On highmem systems it is next
* used for marking "safe" highmem pages, but it has to be reinitialized for
* this purpose.
*/
static struct memory_bitmap copy_bm;
/**
* swsusp_free - free pages allocated for the suspend.
*
* Suspend pages are alocated before the atomic copy is made, so we
* need to release them after the resume.
*/
void swsusp_free(void)
{
unsigned long fb_pfn, fr_pfn;
if (!forbidden_pages_map || !free_pages_map)
goto out;
memory_bm_position_reset(forbidden_pages_map);
memory_bm_position_reset(free_pages_map);
loop:
fr_pfn = memory_bm_next_pfn(free_pages_map);
fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
/*
* Find the next bit set in both bitmaps. This is guaranteed to
* terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
*/
do {
if (fb_pfn < fr_pfn)
fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
if (fr_pfn < fb_pfn)
fr_pfn = memory_bm_next_pfn(free_pages_map);
} while (fb_pfn != fr_pfn);
if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
struct page *page = pfn_to_page(fr_pfn);
memory_bm_clear_current(forbidden_pages_map);
memory_bm_clear_current(free_pages_map);
__free_page(page);
goto loop;
}
out:
nr_copy_pages = 0;
nr_meta_pages = 0;
restore_pblist = NULL;
buffer = NULL;
alloc_normal = 0;
alloc_highmem = 0;
}
/* Helper functions used for the shrinking of memory. */
#define GFP_IMAGE (GFP_KERNEL | __GFP_NOWARN)
/**
* preallocate_image_pages - Allocate a number of pages for hibernation image
* @nr_pages: Number of page frames to allocate.
* @mask: GFP flags to use for the allocation.
*
* Return value: Number of page frames actually allocated
*/
static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
{
unsigned long nr_alloc = 0;
while (nr_pages > 0) {
struct page *page;
page = alloc_image_page(mask);
if (!page)
break;
memory_bm_set_bit(©_bm, page_to_pfn(page));
if (PageHighMem(page))
alloc_highmem++;
else
alloc_normal++;
nr_pages--;
nr_alloc++;
}
return nr_alloc;
}
static unsigned long preallocate_image_memory(unsigned long nr_pages,
unsigned long avail_normal)
{
unsigned long alloc;
if (avail_normal <= alloc_normal)
return 0;
alloc = avail_normal - alloc_normal;
if (nr_pages < alloc)
alloc = nr_pages;
return preallocate_image_pages(alloc, GFP_IMAGE);
}
#ifdef CONFIG_HIGHMEM
static unsigned long preallocate_image_highmem(unsigned long nr_pages)
{
return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
}
/**
* __fraction - Compute (an approximation of) x * (multiplier / base)
*/
static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
{
x *= multiplier;
do_div(x, base);
return (unsigned long)x;
}
static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
unsigned long highmem,
unsigned long total)
{
unsigned long alloc = __fraction(nr_pages, highmem, total);
return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
}
#else /* CONFIG_HIGHMEM */
static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
{
return 0;
}
static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
unsigned long highmem,
unsigned long total)
{
return 0;
}
#endif /* CONFIG_HIGHMEM */
/**
* free_unnecessary_pages - Release preallocated pages not needed for the image
*/
static unsigned long free_unnecessary_pages(void)
{
unsigned long save, to_free_normal, to_free_highmem, free;
save = count_data_pages();
if (alloc_normal >= save) {
to_free_normal = alloc_normal - save;
save = 0;
} else {
to_free_normal = 0;
save -= alloc_normal;
}
save += count_highmem_pages();
if (alloc_highmem >= save) {
to_free_highmem = alloc_highmem - save;
} else {
to_free_highmem = 0;
save -= alloc_highmem;
if (to_free_normal > save)
to_free_normal -= save;
else
to_free_normal = 0;
}
free = to_free_normal + to_free_highmem;
memory_bm_position_reset(©_bm);
while (to_free_normal > 0 || to_free_highmem > 0) {
unsigned long pfn = memory_bm_next_pfn(©_bm);
struct page *page = pfn_to_page(pfn);
if (PageHighMem(page)) {
if (!to_free_highmem)
continue;
to_free_highmem--;
alloc_highmem--;
} else {
if (!to_free_normal)
continue;
to_free_normal--;
alloc_normal--;
}
memory_bm_clear_bit(©_bm, pfn);
swsusp_unset_page_forbidden(page);
swsusp_unset_page_free(page);
__free_page(page);
}
return free;
}
/**
* minimum_image_size - Estimate the minimum acceptable size of an image
* @saveable: Number of saveable pages in the system.
*
* We want to avoid attempting to free too much memory too hard, so estimate the
* minimum acceptable size of a hibernation image to use as the lower limit for
* preallocating memory.
*
* We assume that the minimum image size should be proportional to
*
* [number of saveable pages] - [number of pages that can be freed in theory]
*
* where the second term is the sum of (1) reclaimable slab pages, (2) active
* and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
* minus mapped file pages.
*/
static unsigned long minimum_image_size(unsigned long saveable)
{
unsigned long size;
size = global_page_state(NR_SLAB_RECLAIMABLE)
+ global_page_state(NR_ACTIVE_ANON)
+ global_page_state(NR_INACTIVE_ANON)
+ global_page_state(NR_ACTIVE_FILE)
+ global_page_state(NR_INACTIVE_FILE)
- global_page_state(NR_FILE_MAPPED);
return saveable <= size ? 0 : saveable - size;
}
/**
* hibernate_preallocate_memory - Preallocate memory for hibernation image
*
* To create a hibernation image it is necessary to make a copy of every page
* frame in use. We also need a number of page frames to be free during
* hibernation for allocations made while saving the image and for device
* drivers, in case they need to allocate memory from their hibernation
* callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
* estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
* /sys/power/reserved_size, respectively). To make this happen, we compute the
* total number of available page frames and allocate at least
*
* ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
* + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
*
* of them, which corresponds to the maximum size of a hibernation image.
*
* If image_size is set below the number following from the above formula,
* the preallocation of memory is continued until the total number of saveable
* pages in the system is below the requested image size or the minimum
* acceptable image size returned by minimum_image_size(), whichever is greater.
*/
int hibernate_preallocate_memory(void)
{
struct zone *zone;
unsigned long saveable, size, max_size, count, highmem, pages = 0;
unsigned long alloc, save_highmem, pages_highmem, avail_normal;
ktime_t start, stop;
int error;
printk(KERN_INFO "PM: Preallocating image memory... ");
start = ktime_get();
error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
if (error)
goto err_out;
error = memory_bm_create(©_bm, GFP_IMAGE, PG_ANY);
if (error)
goto err_out;
alloc_normal = 0;
alloc_highmem = 0;
/* Count the number of saveable data pages. */
save_highmem = count_highmem_pages();
saveable = count_data_pages();
/*
* Compute the total number of page frames we can use (count) and the
* number of pages needed for image metadata (size).
*/
count = saveable;
saveable += save_highmem;
highmem = save_highmem;
size = 0;
for_each_populated_zone(zone) {
size += snapshot_additional_pages(zone);
if (is_highmem(zone))
highmem += zone_page_state(zone, NR_FREE_PAGES);
else
count += zone_page_state(zone, NR_FREE_PAGES);
}
avail_normal = count;
count += highmem;
count -= totalreserve_pages;
/* Add number of pages required for page keys (s390 only). */
size += page_key_additional_pages(saveable);
/* Compute the maximum number of saveable pages to leave in memory. */
max_size = (count - (size + PAGES_FOR_IO)) / 2
- 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
/* Compute the desired number of image pages specified by image_size. */
size = DIV_ROUND_UP(image_size, PAGE_SIZE);
if (size > max_size)
size = max_size;
/*
* If the desired number of image pages is at least as large as the
* current number of saveable pages in memory, allocate page frames for
* the image and we're done.
*/
if (size >= saveable) {
pages = preallocate_image_highmem(save_highmem);
pages += preallocate_image_memory(saveable - pages, avail_normal);
goto out;
}
/* Estimate the minimum size of the image. */
pages = minimum_image_size(saveable);
/*
* To avoid excessive pressure on the normal zone, leave room in it to
* accommodate an image of the minimum size (unless it's already too
* small, in which case don't preallocate pages from it at all).
*/
if (avail_normal > pages)
avail_normal -= pages;
else
avail_normal = 0;
if (size < pages)
size = min_t(unsigned long, pages, max_size);
/*
* Let the memory management subsystem know that we're going to need a
* large number of page frames to allocate and make it free some memory.
* NOTE: If this is not done, performance will be hurt badly in some
* test cases.
*/
shrink_all_memory(saveable - size);
/*
* The number of saveable pages in memory was too high, so apply some
* pressure to decrease it. First, make room for the largest possible
* image and fail if that doesn't work. Next, try to decrease the size
* of the image as much as indicated by 'size' using allocations from
* highmem and non-highmem zones separately.
*/
pages_highmem = preallocate_image_highmem(highmem / 2);
alloc = count - max_size;
if (alloc > pages_highmem)
alloc -= pages_highmem;
else
alloc = 0;
pages = preallocate_image_memory(alloc, avail_normal);
if (pages < alloc) {
/* We have exhausted non-highmem pages, try highmem. */
alloc -= pages;
pages += pages_highmem;
pages_highmem = preallocate_image_highmem(alloc);
if (pages_highmem < alloc)
goto err_out;
pages += pages_highmem;
/*
* size is the desired number of saveable pages to leave in
* memory, so try to preallocate (all memory - size) pages.
*/
alloc = (count - pages) - size;
pages += preallocate_image_highmem(alloc);
} else {
/*
* There are approximately max_size saveable pages at this point
* and we want to reduce this number down to size.
*/
alloc = max_size - size;
size = preallocate_highmem_fraction(alloc, highmem, count);
pages_highmem += size;
alloc -= size;
size = preallocate_image_memory(alloc, avail_normal);
pages_highmem += preallocate_image_highmem(alloc - size);
pages += pages_highmem + size;
}
/*
* We only need as many page frames for the image as there are saveable
* pages in memory, but we have allocated more. Release the excessive
* ones now.
*/
pages -= free_unnecessary_pages();
out:
stop = ktime_get();
printk(KERN_CONT "done (allocated %lu pages)\n", pages);
swsusp_show_speed(start, stop, pages, "Allocated");
return 0;
err_out:
printk(KERN_CONT "\n");
swsusp_free();
return -ENOMEM;
}
#ifdef CONFIG_HIGHMEM
/**
* count_pages_for_highmem - compute the number of non-highmem pages
* that will be necessary for creating copies of highmem pages.
*/
static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
{
unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
if (free_highmem >= nr_highmem)
nr_highmem = 0;
else
nr_highmem -= free_highmem;
return nr_highmem;
}
#else
static unsigned int
count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
#endif /* CONFIG_HIGHMEM */
/**
* enough_free_mem - Make sure we have enough free memory for the
* snapshot image.
*/
static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
{
struct zone *zone;
unsigned int free = alloc_normal;
for_each_populated_zone(zone)
if (!is_highmem(zone))
free += zone_page_state(zone, NR_FREE_PAGES);
nr_pages += count_pages_for_highmem(nr_highmem);
pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
nr_pages, PAGES_FOR_IO, free);
return free > nr_pages + PAGES_FOR_IO;
}
#ifdef CONFIG_HIGHMEM
/**
* get_highmem_buffer - if there are some highmem pages in the suspend
* image, we may need the buffer to copy them and/or load their data.
*/
static inline int get_highmem_buffer(int safe_needed)
{
buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
return buffer ? 0 : -ENOMEM;
}
/**
* alloc_highmem_image_pages - allocate some highmem pages for the image.
* Try to allocate as many pages as needed, but if the number of free
* highmem pages is lesser than that, allocate them all.
*/
static inline unsigned int
alloc_highmem_pages(struct memory_bitmap *bm, unsigned int nr_highmem)
{
unsigned int to_alloc = count_free_highmem_pages();
if (to_alloc > nr_highmem)
to_alloc = nr_highmem;
nr_highmem -= to_alloc;
while (to_alloc-- > 0) {
struct page *page;
page = alloc_image_page(__GFP_HIGHMEM);
memory_bm_set_bit(bm, page_to_pfn(page));
}
return nr_highmem;
}
#else
static inline int get_highmem_buffer(int safe_needed) { return 0; }
static inline unsigned int
alloc_highmem_pages(struct memory_bitmap *bm, unsigned int n) { return 0; }
#endif /* CONFIG_HIGHMEM */
/**
* swsusp_alloc - allocate memory for the suspend image
*
* We first try to allocate as many highmem pages as there are
* saveable highmem pages in the system. If that fails, we allocate
* non-highmem pages for the copies of the remaining highmem ones.
*
* In this approach it is likely that the copies of highmem pages will
* also be located in the high memory, because of the way in which
* copy_data_pages() works.
*/
static int
swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
unsigned int nr_pages, unsigned int nr_highmem)
{
if (nr_highmem > 0) {
if (get_highmem_buffer(PG_ANY))
goto err_out;
if (nr_highmem > alloc_highmem) {
nr_highmem -= alloc_highmem;
nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
}
}
if (nr_pages > alloc_normal) {
nr_pages -= alloc_normal;
while (nr_pages-- > 0) {
struct page *page;
page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
if (!page)
goto err_out;
memory_bm_set_bit(copy_bm, page_to_pfn(page));
}
}
return 0;
err_out:
swsusp_free();
return -ENOMEM;
}
asmlinkage __visible int swsusp_save(void)
{
unsigned int nr_pages, nr_highmem;
printk(KERN_INFO "PM: Creating hibernation image:\n");
drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
if (!enough_free_mem(nr_pages, nr_highmem)) {
printk(KERN_ERR "PM: Not enough free memory\n");
return -ENOMEM;
}
if (swsusp_alloc(&orig_bm, ©_bm, nr_pages, nr_highmem)) {
printk(KERN_ERR "PM: Memory allocation failed\n");
return -ENOMEM;
}
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
drain_local_pages(NULL);
copy_data_pages(©_bm, &orig_bm);
/*
* End of critical section. From now on, we can write to memory,
* but we should not touch disk. This specially means we must _not_
* touch swap space! Except we must write out our image of course.
*/
nr_pages += nr_highmem;
nr_copy_pages = nr_pages;
nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
nr_pages);
return 0;
}
#ifndef CONFIG_ARCH_HIBERNATION_HEADER
static int init_header_complete(struct swsusp_info *info)
{
memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
info->version_code = LINUX_VERSION_CODE;
return 0;
}
static char *check_image_kernel(struct swsusp_info *info)
{
if (info->version_code != LINUX_VERSION_CODE)
return "kernel version";
if (strcmp(info->uts.sysname,init_utsname()->sysname))
return "system type";
if (strcmp(info->uts.release,init_utsname()->release))
return "kernel release";
if (strcmp(info->uts.version,init_utsname()->version))
return "version";
if (strcmp(info->uts.machine,init_utsname()->machine))
return "machine";
return NULL;
}
#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
unsigned long snapshot_get_image_size(void)
{
return nr_copy_pages + nr_meta_pages + 1;
}
static int init_header(struct swsusp_info *info)
{
memset(info, 0, sizeof(struct swsusp_info));
info->num_physpages = get_num_physpages();
info->image_pages = nr_copy_pages;
info->pages = snapshot_get_image_size();
info->size = info->pages;
info->size <<= PAGE_SHIFT;
return init_header_complete(info);
}
/**
* pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
* are stored in the array @buf[] (1 page at a time)
*/
static inline void
pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
{
int j;
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
buf[j] = memory_bm_next_pfn(bm);
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
/* Save page key for data page (s390 only). */
page_key_read(buf + j);
}
}
/**
* snapshot_read_next - used for reading the system memory snapshot.
*
* On the first call to it @handle should point to a zeroed
* snapshot_handle structure. The structure gets updated and a pointer
* to it should be passed to this function every next time.
*
* On success the function returns a positive number. Then, the caller
* is allowed to read up to the returned number of bytes from the memory
* location computed by the data_of() macro.
*
* The function returns 0 to indicate the end of data stream condition,
* and a negative number is returned on error. In such cases the
* structure pointed to by @handle is not updated and should not be used
* any more.
*/
int snapshot_read_next(struct snapshot_handle *handle)
{
if (handle->cur > nr_meta_pages + nr_copy_pages)
return 0;
if (!buffer) {
/* This makes the buffer be freed by swsusp_free() */
buffer = get_image_page(GFP_ATOMIC, PG_ANY);
if (!buffer)
return -ENOMEM;
}
if (!handle->cur) {
int error;
error = init_header((struct swsusp_info *)buffer);
if (error)
return error;
handle->buffer = buffer;
memory_bm_position_reset(&orig_bm);
memory_bm_position_reset(©_bm);
} else if (handle->cur <= nr_meta_pages) {
clear_page(buffer);
pack_pfns(buffer, &orig_bm);
} else {
struct page *page;
page = pfn_to_page(memory_bm_next_pfn(©_bm));
if (PageHighMem(page)) {
/* Highmem pages are copied to the buffer,
* because we can't return with a kmapped
* highmem page (we may not be called again).
*/
void *kaddr;
kaddr = kmap_atomic(page);
copy_page(buffer, kaddr);
kunmap_atomic(kaddr);
handle->buffer = buffer;
} else {
handle->buffer = page_address(page);
}
}
handle->cur++;
return PAGE_SIZE;
}
/**
* mark_unsafe_pages - mark the pages that cannot be used for storing
* the image during resume, because they conflict with the pages that
* had been used before suspend
*/
static int mark_unsafe_pages(struct memory_bitmap *bm)
{
struct zone *zone;
unsigned long pfn, max_zone_pfn;
/* Clear page flags */
for_each_populated_zone(zone) {
max_zone_pfn = zone_end_pfn(zone);
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
if (pfn_valid(pfn))
swsusp_unset_page_free(pfn_to_page(pfn));
}
/* Mark pages that correspond to the "original" pfns as "unsafe" */
memory_bm_position_reset(bm);
do {
pfn = memory_bm_next_pfn(bm);
if (likely(pfn != BM_END_OF_MAP)) {
if (likely(pfn_valid(pfn)))
swsusp_set_page_free(pfn_to_page(pfn));
else
return -EFAULT;
}
} while (pfn != BM_END_OF_MAP);
allocated_unsafe_pages = 0;
return 0;
}
static void
duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
{
unsigned long pfn;
memory_bm_position_reset(src);
pfn = memory_bm_next_pfn(src);
while (pfn != BM_END_OF_MAP) {
memory_bm_set_bit(dst, pfn);
pfn = memory_bm_next_pfn(src);
}
}
static int check_header(struct swsusp_info *info)
{
char *reason;
reason = check_image_kernel(info);
if (!reason && info->num_physpages != get_num_physpages())
reason = "memory size";
if (reason) {
printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
return -EPERM;
}
return 0;
}
/**
* load header - check the image header and copy data from it
*/
static int
load_header(struct swsusp_info *info)
{
int error;
restore_pblist = NULL;
error = check_header(info);
if (!error) {
nr_copy_pages = info->image_pages;
nr_meta_pages = info->pages - info->image_pages - 1;
}
return error;
}
/**
* unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
* the corresponding bit in the memory bitmap @bm
*/
static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
{
int j;
for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
if (unlikely(buf[j] == BM_END_OF_MAP))
break;
/* Extract and buffer page key for data page (s390 only). */
page_key_memorize(buf + j);
if (memory_bm_pfn_present(bm, buf[j]))
memory_bm_set_bit(bm, buf[j]);
else
return -EFAULT;
}
return 0;
}
/* List of "safe" pages that may be used to store data loaded from the suspend
* image
*/
static struct linked_page *safe_pages_list;
#ifdef CONFIG_HIGHMEM
/* struct highmem_pbe is used for creating the list of highmem pages that
* should be restored atomically during the resume from disk, because the page
* frames they have occupied before the suspend are in use.
*/
struct highmem_pbe {
struct page *copy_page; /* data is here now */
struct page *orig_page; /* data was here before the suspend */
struct highmem_pbe *next;
};
/* List of highmem PBEs needed for restoring the highmem pages that were
* allocated before the suspend and included in the suspend image, but have
* also been allocated by the "resume" kernel, so their contents cannot be
* written directly to their "original" page frames.
*/
static struct highmem_pbe *highmem_pblist;
/**
* count_highmem_image_pages - compute the number of highmem pages in the
* suspend image. The bits in the memory bitmap @bm that correspond to the
* image pages are assumed to be set.
*/
static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
{
unsigned long pfn;
unsigned int cnt = 0;
memory_bm_position_reset(bm);
pfn = memory_bm_next_pfn(bm);
while (pfn != BM_END_OF_MAP) {
if (PageHighMem(pfn_to_page(pfn)))
cnt++;
pfn = memory_bm_next_pfn(bm);
}
return cnt;
}
/**
* prepare_highmem_image - try to allocate as many highmem pages as
* there are highmem image pages (@nr_highmem_p points to the variable
* containing the number of highmem image pages). The pages that are
* "safe" (ie. will not be overwritten when the suspend image is
* restored) have the corresponding bits set in @bm (it must be
* unitialized).
*
* NOTE: This function should not be called if there are no highmem
* image pages.
*/
static unsigned int safe_highmem_pages;
static struct memory_bitmap *safe_highmem_bm;
static int
prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
{
unsigned int to_alloc;
if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
return -ENOMEM;
if (get_highmem_buffer(PG_SAFE))
return -ENOMEM;
to_alloc = count_free_highmem_pages();
if (to_alloc > *nr_highmem_p)
to_alloc = *nr_highmem_p;
else
*nr_highmem_p = to_alloc;
safe_highmem_pages = 0;
while (to_alloc-- > 0) {
struct page *page;
page = alloc_page(__GFP_HIGHMEM);
if (!swsusp_page_is_free(page)) {
/* The page is "safe", set its bit the bitmap */
memory_bm_set_bit(bm, page_to_pfn(page));
safe_highmem_pages++;
}
/* Mark the page as allocated */
swsusp_set_page_forbidden(page);
swsusp_set_page_free(page);
}
memory_bm_position_reset(bm);
safe_highmem_bm = bm;
return 0;
}
/**
* get_highmem_page_buffer - for given highmem image page find the buffer
* that suspend_write_next() should set for its caller to write to.
*
* If the page is to be saved to its "original" page frame or a copy of
* the page is to be made in the highmem, @buffer is returned. Otherwise,
* the copy of the page is to be made in normal memory, so the address of
* the copy is returned.
*
* If @buffer is returned, the caller of suspend_write_next() will write
* the page's contents to @buffer, so they will have to be copied to the
* right location on the next call to suspend_write_next() and it is done
* with the help of copy_last_highmem_page(). For this purpose, if
* @buffer is returned, @last_highmem page is set to the page to which
* the data will have to be copied from @buffer.
*/
static struct page *last_highmem_page;
static void *
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
{
struct highmem_pbe *pbe;
void *kaddr;
if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
/* We have allocated the "original" page frame and we can
* use it directly to store the loaded page.
*/
last_highmem_page = page;
return buffer;
}
/* The "original" page frame has not been allocated and we have to
* use a "safe" page frame to store the loaded page.
*/
pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
if (!pbe) {
swsusp_free();
return ERR_PTR(-ENOMEM);
}
pbe->orig_page = page;
if (safe_highmem_pages > 0) {
struct page *tmp;
/* Copy of the page will be stored in high memory */
kaddr = buffer;
tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
safe_highmem_pages--;
last_highmem_page = tmp;
pbe->copy_page = tmp;
} else {
/* Copy of the page will be stored in normal memory */
kaddr = safe_pages_list;
safe_pages_list = safe_pages_list->next;
pbe->copy_page = virt_to_page(kaddr);
}
pbe->next = highmem_pblist;
highmem_pblist = pbe;
return kaddr;
}
/**
* copy_last_highmem_page - copy the contents of a highmem image from
* @buffer, where the caller of snapshot_write_next() has place them,
* to the right location represented by @last_highmem_page .
*/
static void copy_last_highmem_page(void)
{
if (last_highmem_page) {
void *dst;
dst = kmap_atomic(last_highmem_page);
copy_page(dst, buffer);
kunmap_atomic(dst);
last_highmem_page = NULL;
}
}
static inline int last_highmem_page_copied(void)
{
return !last_highmem_page;
}
static inline void free_highmem_data(void)
{
if (safe_highmem_bm)
memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
if (buffer)
free_image_page(buffer, PG_UNSAFE_CLEAR);
}
#else
static unsigned int
count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
static inline int
prepare_highmem_image(struct memory_bitmap *bm, unsigned int *nr_highmem_p)
{
return 0;
}
static inline void *
get_highmem_page_buffer(struct page *page, struct chain_allocator *ca)
{
return ERR_PTR(-EINVAL);
}
static inline void copy_last_highmem_page(void) {}
static inline int last_highmem_page_copied(void) { return 1; }
static inline void free_highmem_data(void) {}
#endif /* CONFIG_HIGHMEM */
/**
* prepare_image - use the memory bitmap @bm to mark the pages that will
* be overwritten in the process of restoring the system memory state
* from the suspend image ("unsafe" pages) and allocate memory for the
* image.
*
* The idea is to allocate a new memory bitmap first and then allocate
* as many pages as needed for the image data, but not to assign these
* pages to specific tasks initially. Instead, we just mark them as
* allocated and create a lists of "safe" pages that will be used
* later. On systems with high memory a list of "safe" highmem pages is
* also created.
*/
#define PBES_PER_LINKED_PAGE (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
static int
prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
{
unsigned int nr_pages, nr_highmem;
struct linked_page *sp_list, *lp;
int error;
/* If there is no highmem, the buffer will not be necessary */
free_image_page(buffer, PG_UNSAFE_CLEAR);
buffer = NULL;
nr_highmem = count_highmem_image_pages(bm);
error = mark_unsafe_pages(bm);
if (error)
goto Free;
error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
if (error)
goto Free;
duplicate_memory_bitmap(new_bm, bm);
memory_bm_free(bm, PG_UNSAFE_KEEP);
if (nr_highmem > 0) {
error = prepare_highmem_image(bm, &nr_highmem);
if (error)
goto Free;
}
/* Reserve some safe pages for potential later use.
*
* NOTE: This way we make sure there will be enough safe pages for the
* chain_alloc() in get_buffer(). It is a bit wasteful, but
* nr_copy_pages cannot be greater than 50% of the memory anyway.
*/
sp_list = NULL;
/* nr_copy_pages cannot be lesser than allocated_unsafe_pages */
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
while (nr_pages > 0) {
lp = get_image_page(GFP_ATOMIC, PG_SAFE);
if (!lp) {
error = -ENOMEM;
goto Free;
}
lp->next = sp_list;
sp_list = lp;
nr_pages--;
}
/* Preallocate memory for the image */
safe_pages_list = NULL;
nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
while (nr_pages > 0) {
lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
if (!lp) {
error = -ENOMEM;
goto Free;
}
if (!swsusp_page_is_free(virt_to_page(lp))) {
/* The page is "safe", add it to the list */
lp->next = safe_pages_list;
safe_pages_list = lp;
}
/* Mark the page as allocated */
swsusp_set_page_forbidden(virt_to_page(lp));
swsusp_set_page_free(virt_to_page(lp));
nr_pages--;
}
/* Free the reserved safe pages so that chain_alloc() can use them */
while (sp_list) {
lp = sp_list->next;
free_image_page(sp_list, PG_UNSAFE_CLEAR);
sp_list = lp;
}
return 0;
Free:
swsusp_free();
return error;
}
/**
* get_buffer - compute the address that snapshot_write_next() should
* set for its caller to write to.
*/
static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
{
struct pbe *pbe;
struct page *page;
unsigned long pfn = memory_bm_next_pfn(bm);
if (pfn == BM_END_OF_MAP)
return ERR_PTR(-EFAULT);
page = pfn_to_page(pfn);
if (PageHighMem(page))
return get_highmem_page_buffer(page, ca);
if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
/* We have allocated the "original" page frame and we can
* use it directly to store the loaded page.
*/
return page_address(page);
/* The "original" page frame has not been allocated and we have to
* use a "safe" page frame to store the loaded page.
*/
pbe = chain_alloc(ca, sizeof(struct pbe));
if (!pbe) {
swsusp_free();
return ERR_PTR(-ENOMEM);
}
pbe->orig_address = page_address(page);
pbe->address = safe_pages_list;
safe_pages_list = safe_pages_list->next;
pbe->next = restore_pblist;
restore_pblist = pbe;
return pbe->address;
}
/**
* snapshot_write_next - used for writing the system memory snapshot.
*
* On the first call to it @handle should point to a zeroed
* snapshot_handle structure. The structure gets updated and a pointer
* to it should be passed to this function every next time.
*
* On success the function returns a positive number. Then, the caller
* is allowed to write up to the returned number of bytes to the memory
* location computed by the data_of() macro.
*
* The function returns 0 to indicate the "end of file" condition,
* and a negative number is returned on error. In such cases the
* structure pointed to by @handle is not updated and should not be used
* any more.
*/
int snapshot_write_next(struct snapshot_handle *handle)
{
static struct chain_allocator ca;
int error = 0;
/* Check if we have already loaded the entire image */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
return 0;
handle->sync_read = 1;
if (!handle->cur) {
if (!buffer)
/* This makes the buffer be freed by swsusp_free() */
buffer = get_image_page(GFP_ATOMIC, PG_ANY);
if (!buffer)
return -ENOMEM;
handle->buffer = buffer;
} else if (handle->cur == 1) {
error = load_header(buffer);
if (error)
return error;
error = memory_bm_create(©_bm, GFP_ATOMIC, PG_ANY);
if (error)
return error;
/* Allocate buffer for page keys. */
error = page_key_alloc(nr_copy_pages);
if (error)
return error;
} else if (handle->cur <= nr_meta_pages + 1) {
error = unpack_orig_pfns(buffer, ©_bm);
if (error)
return error;
if (handle->cur == nr_meta_pages + 1) {
error = prepare_image(&orig_bm, ©_bm);
if (error)
return error;
chain_init(&ca, GFP_ATOMIC, PG_SAFE);
memory_bm_position_reset(&orig_bm);
restore_pblist = NULL;
handle->buffer = get_buffer(&orig_bm, &ca);
handle->sync_read = 0;
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
}
} else {
copy_last_highmem_page();
/* Restore page key for data page (s390 only). */
page_key_write(handle->buffer);
handle->buffer = get_buffer(&orig_bm, &ca);
if (IS_ERR(handle->buffer))
return PTR_ERR(handle->buffer);
if (handle->buffer != buffer)
handle->sync_read = 0;
}
handle->cur++;
return PAGE_SIZE;
}
/**
* snapshot_write_finalize - must be called after the last call to
* snapshot_write_next() in case the last page in the image happens
* to be a highmem page and its contents should be stored in the
* highmem. Additionally, it releases the memory that will not be
* used any more.
*/
void snapshot_write_finalize(struct snapshot_handle *handle)
{
copy_last_highmem_page();
/* Restore page key for data page (s390 only). */
page_key_write(handle->buffer);
page_key_free();
/* Free only if we have loaded the image entirely */
if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
memory_bm_free(&orig_bm, PG_UNSAFE_CLEAR);
free_highmem_data();
}
}
int snapshot_image_loaded(struct snapshot_handle *handle)
{
return !(!nr_copy_pages || !last_highmem_page_copied() ||
handle->cur <= nr_meta_pages + nr_copy_pages);
}
#ifdef CONFIG_HIGHMEM
/* Assumes that @buf is ready and points to a "safe" page */
static inline void
swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
{
void *kaddr1, *kaddr2;
kaddr1 = kmap_atomic(p1);
kaddr2 = kmap_atomic(p2);
copy_page(buf, kaddr1);
copy_page(kaddr1, kaddr2);
copy_page(kaddr2, buf);
kunmap_atomic(kaddr2);
kunmap_atomic(kaddr1);
}
/**
* restore_highmem - for each highmem page that was allocated before
* the suspend and included in the suspend image, and also has been
* allocated by the "resume" kernel swap its current (ie. "before
* resume") contents with the previous (ie. "before suspend") one.
*
* If the resume eventually fails, we can call this function once
* again and restore the "before resume" highmem state.
*/
int restore_highmem(void)
{
struct highmem_pbe *pbe = highmem_pblist;
void *buf;
if (!pbe)
return 0;
buf = get_image_page(GFP_ATOMIC, PG_SAFE);
if (!buf)
return -ENOMEM;
while (pbe) {
swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
pbe = pbe->next;
}
free_image_page(buf, PG_UNSAFE_CLEAR);
return 0;
}
#endif /* CONFIG_HIGHMEM */
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/power/suspend.c
/*
* kernel/power/suspend.c - Suspend to RAM and standby functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
* Copyright (c) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
*
* This file is released under the GPLv2.
*/
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/syscalls.h>
#include <linux/gfp.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/export.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/ftrace.h>
#include <trace/events/power.h>
#include <linux/compiler.h>
#include <linux/moduleparam.h>
#include "power.h"
const char *pm_labels[] = { "mem", "standby", "freeze", NULL };
const char *pm_states[PM_SUSPEND_MAX];
static const struct platform_suspend_ops *suspend_ops;
static const struct platform_freeze_ops *freeze_ops;
static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head);
enum freeze_state __read_mostly suspend_freeze_state;
static DEFINE_SPINLOCK(suspend_freeze_lock);
void freeze_set_ops(const struct platform_freeze_ops *ops)
{
lock_system_sleep();
freeze_ops = ops;
unlock_system_sleep();
}
static void freeze_begin(void)
{
suspend_freeze_state = FREEZE_STATE_NONE;
}
static void freeze_enter(void)
{
spin_lock_irq(&suspend_freeze_lock);
if (pm_wakeup_pending())
goto out;
suspend_freeze_state = FREEZE_STATE_ENTER;
spin_unlock_irq(&suspend_freeze_lock);
get_online_cpus();
cpuidle_resume();
/* Push all the CPUs into the idle loop. */
wake_up_all_idle_cpus();
pr_debug("PM: suspend-to-idle\n");
/* Make the current CPU wait so it can enter the idle loop too. */
wait_event(suspend_freeze_wait_head,
suspend_freeze_state == FREEZE_STATE_WAKE);
pr_debug("PM: resume from suspend-to-idle\n");
cpuidle_pause();
put_online_cpus();
spin_lock_irq(&suspend_freeze_lock);
out:
suspend_freeze_state = FREEZE_STATE_NONE;
spin_unlock_irq(&suspend_freeze_lock);
}
void freeze_wake(void)
{
unsigned long flags;
spin_lock_irqsave(&suspend_freeze_lock, flags);
if (suspend_freeze_state > FREEZE_STATE_NONE) {
suspend_freeze_state = FREEZE_STATE_WAKE;
wake_up(&suspend_freeze_wait_head);
}
spin_unlock_irqrestore(&suspend_freeze_lock, flags);
}
EXPORT_SYMBOL_GPL(freeze_wake);
static bool valid_state(suspend_state_t state)
{
/*
* PM_SUSPEND_STANDBY and PM_SUSPEND_MEM states need low level
* support and need to be valid to the low level
* implementation, no valid callback implies that none are valid.
*/
return suspend_ops && suspend_ops->valid && suspend_ops->valid(state);
}
/*
* If this is set, the "mem" label always corresponds to the deepest sleep state
* available, the "standby" label corresponds to the second deepest sleep state
* available (if any), and the "freeze" label corresponds to the remaining
* available sleep state (if there is one).
*/
static bool relative_states;
static int __init sleep_states_setup(char *str)
{
relative_states = !strncmp(str, "1", 1);
pm_states[PM_SUSPEND_FREEZE] = pm_labels[relative_states ? 0 : 2];
return 1;
}
__setup("relative_sleep_states=", sleep_states_setup);
/**
* suspend_set_ops - Set the global suspend method table.
* @ops: Suspend operations to use.
*/
void suspend_set_ops(const struct platform_suspend_ops *ops)
{
suspend_state_t i;
int j = 0;
lock_system_sleep();
suspend_ops = ops;
for (i = PM_SUSPEND_MEM; i >= PM_SUSPEND_STANDBY; i--)
if (valid_state(i)) {
pm_states[i] = pm_labels[j++];
} else if (!relative_states) {
pm_states[i] = NULL;
j++;
}
pm_states[PM_SUSPEND_FREEZE] = pm_labels[j];
unlock_system_sleep();
}
EXPORT_SYMBOL_GPL(suspend_set_ops);
/**
* suspend_valid_only_mem - Generic memory-only valid callback.
*
* Platform drivers that implement mem suspend only and only need to check for
* that in their .valid() callback can use this instead of rolling their own
* .valid() callback.
*/
int suspend_valid_only_mem(suspend_state_t state)
{
return state == PM_SUSPEND_MEM;
}
EXPORT_SYMBOL_GPL(suspend_valid_only_mem);
static bool sleep_state_supported(suspend_state_t state)
{
return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter);
}
static int platform_suspend_prepare(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ?
suspend_ops->prepare() : 0;
}
static int platform_suspend_prepare_late(suspend_state_t state)
{
return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
freeze_ops->prepare() : 0;
}
static int platform_suspend_prepare_noirq(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
suspend_ops->prepare_late() : 0;
}
static void platform_resume_noirq(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
suspend_ops->wake();
}
static void platform_resume_early(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
freeze_ops->restore();
}
static void platform_resume_finish(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
suspend_ops->finish();
}
static int platform_suspend_begin(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin)
return freeze_ops->begin();
else if (suspend_ops->begin)
return suspend_ops->begin(state);
else
return 0;
}
static void platform_resume_end(suspend_state_t state)
{
if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
freeze_ops->end();
else if (suspend_ops->end)
suspend_ops->end();
}
static void platform_recover(suspend_state_t state)
{
if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
suspend_ops->recover();
}
static bool platform_suspend_again(suspend_state_t state)
{
return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ?
suspend_ops->suspend_again() : false;
}
#ifdef CONFIG_PM_DEBUG
static unsigned int pm_test_delay = 5;
module_param(pm_test_delay, uint, 0644);
MODULE_PARM_DESC(pm_test_delay,
"Number of seconds to wait before resuming from suspend test");
#endif
static int suspend_test(int level)
{
#ifdef CONFIG_PM_DEBUG
if (pm_test_level == level) {
printk(KERN_INFO "suspend debug: Waiting for %d second(s).\n",
pm_test_delay);
mdelay(pm_test_delay * 1000);
return 1;
}
#endif /* !CONFIG_PM_DEBUG */
return 0;
}
/**
* suspend_prepare - Prepare for entering system sleep state.
*
* Common code run for every system sleep state that can be entered (except for
* hibernation). Run suspend notifiers, allocate the "suspend" console and
* freeze processes.
*/
static int suspend_prepare(suspend_state_t state)
{
int error;
if (!sleep_state_supported(state))
return -EPERM;
pm_prepare_console();
error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
if (error)
goto Finish;
trace_suspend_resume(TPS("freeze_processes"), 0, true);
error = suspend_freeze_processes();
trace_suspend_resume(TPS("freeze_processes"), 0, false);
if (!error)
return 0;
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
return error;
}
/* default implementation */
void __weak arch_suspend_disable_irqs(void)
{
local_irq_disable();
}
/* default implementation */
void __weak arch_suspend_enable_irqs(void)
{
local_irq_enable();
}
/**
* suspend_enter - Make the system enter the given sleep state.
* @state: System sleep state to enter.
* @wakeup: Returns information that the sleep state should not be re-entered.
*
* This function should be called after devices have been suspended.
*/
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
int error;
error = platform_suspend_prepare(state);
if (error)
goto Platform_finish;
error = dpm_suspend_late(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: late suspend of devices failed\n");
goto Platform_finish;
}
error = platform_suspend_prepare_late(state);
if (error)
goto Devices_early_resume;
error = dpm_suspend_noirq(PMSG_SUSPEND);
if (error) {
printk(KERN_ERR "PM: noirq suspend of devices failed\n");
goto Platform_early_resume;
}
error = platform_suspend_prepare_noirq(state);
if (error)
goto Platform_wake;
if (suspend_test(TEST_PLATFORM))
goto Platform_wake;
/*
* PM_SUSPEND_FREEZE equals
* frozen processes + suspended devices + idle processors.
* Thus we should invoke freeze_enter() soon after
* all the devices are suspended.
*/
if (state == PM_SUSPEND_FREEZE) {
trace_suspend_resume(TPS("machine_suspend"), state, true);
freeze_enter();
trace_suspend_resume(TPS("machine_suspend"), state, false);
goto Platform_wake;
}
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
if (!(suspend_test(TEST_CORE) || *wakeup)) {
trace_suspend_resume(TPS("machine_suspend"),
state, true);
error = suspend_ops->enter(state);
trace_suspend_resume(TPS("machine_suspend"),
state, false);
events_check_enabled = false;
} else if (*wakeup) {
error = -EBUSY;
}
syscore_resume();
}
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
Enable_cpus:
enable_nonboot_cpus();
Platform_wake:
platform_resume_noirq(state);
dpm_resume_noirq(PMSG_RESUME);
Platform_early_resume:
platform_resume_early(state);
Devices_early_resume:
dpm_resume_early(PMSG_RESUME);
Platform_finish:
platform_resume_finish(state);
return error;
}
/**
* suspend_devices_and_enter - Suspend devices and enter system sleep state.
* @state: System sleep state to enter.
*/
int suspend_devices_and_enter(suspend_state_t state)
{
int error;
bool wakeup = false;
if (!sleep_state_supported(state))
return -ENOSYS;
error = platform_suspend_begin(state);
if (error)
goto Close;
suspend_console();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
goto Recover_platform;
}
suspend_test_finish("suspend devices");
if (suspend_test(TEST_DEVICES))
goto Recover_platform;
do {
error = suspend_enter(state, &wakeup);
} while (!error && !wakeup && platform_suspend_again(state));
Resume_devices:
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
trace_suspend_resume(TPS("resume_console"), state, true);
resume_console();
trace_suspend_resume(TPS("resume_console"), state, false);
Close:
platform_resume_end(state);
return error;
Recover_platform:
platform_recover(state);
goto Resume_devices;
}
/**
* suspend_finish - Clean up before finishing the suspend sequence.
*
* Call platform code to clean up, restart processes, and free the console that
* we've allocated. This routine is not called for hibernation.
*/
static void suspend_finish(void)
{
suspend_thaw_processes();
pm_notifier_call_chain(PM_POST_SUSPEND);
pm_restore_console();
}
/**
* enter_state - Do common work needed to enter system sleep state.
* @state: System sleep state to enter.
*
* Make sure that no one else is trying to put the system into a sleep state.
* Fail if that's not the case. Otherwise, prepare for system suspend, make the
* system enter the given sleep state and clean up after wakeup.
*/
static int enter_state(suspend_state_t state)
{
int error;
trace_suspend_resume(TPS("suspend_enter"), state, true);
if (state == PM_SUSPEND_FREEZE) {
#ifdef CONFIG_PM_DEBUG
if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) {
pr_warning("PM: Unsupported test mode for suspend to idle,"
"please choose none/freezer/devices/platform.\n");
return -EAGAIN;
}
#endif
} else if (!valid_state(state)) {
return -EINVAL;
}
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
if (state == PM_SUSPEND_FREEZE)
freeze_begin();
trace_suspend_resume(TPS("sync_filesystems"), 0, true);
printk(KERN_INFO "PM: Syncing filesystems ... ");
sys_sync();
printk("done.\n");
trace_suspend_resume(TPS("sync_filesystems"), 0, false);
pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]);
error = suspend_prepare(state);
if (error)
goto Unlock;
if (suspend_test(TEST_FREEZER))
goto Finish;
trace_suspend_resume(TPS("suspend_enter"), state, false);
pr_debug("PM: Suspending system (%s)\n", pm_states[state]);
pm_restrict_gfp_mask();
error = suspend_devices_and_enter(state);
pm_restore_gfp_mask();
Finish:
pr_debug("PM: Finishing wakeup.\n");
suspend_finish();
Unlock:
mutex_unlock(&pm_mutex);
return error;
}
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
*
* Check if the value of @state represents one of the supported states,
* execute enter_state() and update system suspend statistics.
*/
int pm_suspend(suspend_state_t state)
{
int error;
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
error = enter_state(state);
if (error) {
suspend_stats.fail++;
dpm_save_failed_errno(error);
} else {
suspend_stats.success++;
}
return error;
}
EXPORT_SYMBOL(pm_suspend);
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/power/suspend_test.c
/*
* kernel/power/suspend_test.c - Suspend to RAM and standby test facility.
*
* Copyright (c) 2009 Pavel Machek <pavel@ucw.cz>
*
* This file is released under the GPLv2.
*/
#include <linux/init.h>
#include <linux/rtc.h>
#include "power.h"
/*
* We test the system suspend code by setting an RTC wakealarm a short
* time in the future, then suspending. Suspending the devices won't
* normally take long ... some systems only need a few milliseconds.
*
* The time it takes is system-specific though, so when we test this
* during system bootup we allow a LOT of time.
*/
#define TEST_SUSPEND_SECONDS 10
static unsigned long suspend_test_start_time;
static u32 test_repeat_count_max = 1;
static u32 test_repeat_count_current;
void suspend_test_start(void)
{
/* FIXME Use better timebase than "jiffies", ideally a clocksource.
* What we want is a hardware counter that will work correctly even
* during the irqs-are-off stages of the suspend/resume cycle...
*/
suspend_test_start_time = jiffies;
}
void suspend_test_finish(const char *label)
{
long nj = jiffies - suspend_test_start_time;
unsigned msec;
msec = jiffies_to_msecs(abs(nj));
pr_info("PM: %s took %d.%03d seconds\n", label,
msec / 1000, msec % 1000);
/* Warning on suspend means the RTC alarm period needs to be
* larger -- the system was sooo slooowwww to suspend that the
* alarm (should have) fired before the system went to sleep!
*
* Warning on either suspend or resume also means the system
* has some performance issues. The stack dump of a WARN_ON
* is more likely to get the right attention than a printk...
*/
WARN(msec > (TEST_SUSPEND_SECONDS * 1000),
"Component: %s, time: %u\n", label, msec);
}
/*
* To test system suspend, we need a hands-off mechanism to resume the
* system. RTCs wake alarms are a common self-contained mechanism.
*/
static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
{
static char err_readtime[] __initdata =
KERN_ERR "PM: can't read %s time, err %d\n";
static char err_wakealarm [] __initdata =
KERN_ERR "PM: can't set %s wakealarm, err %d\n";
static char err_suspend[] __initdata =
KERN_ERR "PM: suspend test failed, error %d\n";
static char info_test[] __initdata =
KERN_INFO "PM: test RTC wakeup from '%s' suspend\n";
unsigned long now;
struct rtc_wkalrm alm;
int status;
/* this may fail if the RTC hasn't been initialized */
repeat:
status = rtc_read_time(rtc, &alm.time);
if (status < 0) {
printk(err_readtime, dev_name(&rtc->dev), status);
return;
}
rtc_tm_to_time(&alm.time, &now);
memset(&alm, 0, sizeof alm);
rtc_time_to_tm(now + TEST_SUSPEND_SECONDS, &alm.time);
alm.enabled = true;
status = rtc_set_alarm(rtc, &alm);
if (status < 0) {
printk(err_wakealarm, dev_name(&rtc->dev), status);
return;
}
if (state == PM_SUSPEND_MEM) {
printk(info_test, pm_states[state]);
status = pm_suspend(state);
if (status == -ENODEV)
state = PM_SUSPEND_STANDBY;
}
if (state == PM_SUSPEND_STANDBY) {
printk(info_test, pm_states[state]);
status = pm_suspend(state);
if (status < 0)
state = PM_SUSPEND_FREEZE;
}
if (state == PM_SUSPEND_FREEZE) {
printk(info_test, pm_states[state]);
status = pm_suspend(state);
}
if (status < 0)
printk(err_suspend, status);
test_repeat_count_current++;
if (test_repeat_count_current < test_repeat_count_max)
goto repeat;
/* Some platforms can't detect that the alarm triggered the
* wakeup, or (accordingly) disable it after it afterwards.
* It's supposed to give oneshot behavior; cope.
*/
alm.enabled = false;
rtc_set_alarm(rtc, &alm);
}
static int __init has_wakealarm(struct device *dev, const void *data)
{
struct rtc_device *candidate = to_rtc_device(dev);
if (!candidate->ops->set_alarm)
return 0;
if (!device_may_wakeup(candidate->dev.parent))
return 0;
return 1;
}
/*
* Kernel options like "test_suspend=mem" force suspend/resume sanity tests
* at startup time. They're normally disabled, for faster boot and because
* we can't know which states really work on this particular system.
*/
static const char *test_state_label __initdata;
static char warn_bad_state[] __initdata =
KERN_WARNING "PM: can't test '%s' suspend state\n";
static int __init setup_test_suspend(char *value)
{
int i;
char *repeat;
char *suspend_type;
/* example : "=mem[,N]" ==> "mem[,N]" */
value++;
suspend_type = strsep(&value, ",");
if (!suspend_type)
return 0;
repeat = strsep(&value, ",");
if (repeat) {
if (kstrtou32(repeat, 0, &test_repeat_count_max))
return 0;
}
for (i = 0; pm_labels[i]; i++)
if (!strcmp(pm_labels[i], suspend_type)) {
test_state_label = pm_labels[i];
return 0;
}
printk(warn_bad_state, suspend_type);
return 0;
}
__setup("test_suspend", setup_test_suspend);
static int __init test_suspend(void)
{
static char warn_no_rtc[] __initdata =
KERN_WARNING "PM: no wakealarm-capable RTC driver is ready\n";
struct rtc_device *rtc = NULL;
struct device *dev;
suspend_state_t test_state;
/* PM is initialized by now; is that state testable? */
if (!test_state_label)
return 0;
for (test_state = PM_SUSPEND_MIN; test_state < PM_SUSPEND_MAX; test_state++) {
const char *state_label = pm_states[test_state];
if (state_label && !strcmp(test_state_label, state_label))
break;
}
if (test_state == PM_SUSPEND_MAX) {
printk(warn_bad_state, test_state_label);
return 0;
}
/* RTCs have initialized by now too ... can we use one? */
dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
if (dev)
rtc = rtc_class_open(dev_name(dev));
if (!rtc) {
printk(warn_no_rtc);
return 0;
}
/* go for it */
test_wakealarm(rtc, test_state);
rtc_class_close(rtc);
return 0;
}
late_initcall(test_suspend);
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/power/swap.c
/*
* linux/kernel/power/swap.c
*
* This file provides functions for reading the suspend image from
* and writing it to a swap partition.
*
* Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
* Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com>
*
* This file is released under the GPLv2.
*
*/
#include <linux/module.h>
#include <linux/file.h>
#include <linux/delay.h>
#include <linux/bitops.h>
#include <linux/genhd.h>
#include <linux/device.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/lzo.h>
#include <linux/vmalloc.h>
#include <linux/cpumask.h>
#include <linux/atomic.h>
#include <linux/kthread.h>
#include <linux/crc32.h>
#include <linux/ktime.h>
#include "power.h"
#define HIBERNATE_SIG "S1SUSPEND"
/*
* The swap map is a data structure used for keeping track of each page
* written to a swap partition. It consists of many swap_map_page
* structures that contain each an array of MAP_PAGE_ENTRIES swap entries.
* These structures are stored on the swap and linked together with the
* help of the .next_swap member.
*
* The swap map is created during suspend. The swap map pages are
* allocated and populated one at a time, so we only need one memory
* page to set up the entire structure.
*
* During resume we pick up all swap_map_page structures into a list.
*/
#define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1)
/*
* Number of free pages that are not high.
*/
static inline unsigned long low_free_pages(void)
{
return nr_free_pages() - nr_free_highpages();
}
/*
* Number of pages required to be kept free while writing the image. Always
* half of all available low pages before the writing starts.
*/
static inline unsigned long reqd_free_pages(void)
{
return low_free_pages() / 2;
}
struct swap_map_page {
sector_t entries[MAP_PAGE_ENTRIES];
sector_t next_swap;
};
struct swap_map_page_list {
struct swap_map_page *map;
struct swap_map_page_list *next;
};
/**
* The swap_map_handle structure is used for handling swap in
* a file-alike way
*/
struct swap_map_handle {
struct swap_map_page *cur;
struct swap_map_page_list *maps;
sector_t cur_swap;
sector_t first_sector;
unsigned int k;
unsigned long reqd_free_pages;
u32 crc32;
};
struct swsusp_header {
char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) -
sizeof(u32)];
u32 crc32;
sector_t image;
unsigned int flags; /* Flags to pass to the "boot" kernel */
char orig_sig[10];
char sig[10];
} __packed;
static struct swsusp_header *swsusp_header;
/**
* The following functions are used for tracing the allocated
* swap pages, so that they can be freed in case of an error.
*/
struct swsusp_extent {
struct rb_node node;
unsigned long start;
unsigned long end;
};
static struct rb_root swsusp_extents = RB_ROOT;
static int swsusp_extents_insert(unsigned long swap_offset)
{
struct rb_node **new = &(swsusp_extents.rb_node);
struct rb_node *parent = NULL;
struct swsusp_extent *ext;
/* Figure out where to put the new node */
while (*new) {
ext = rb_entry(*new, struct swsusp_extent, node);
parent = *new;
if (swap_offset < ext->start) {
/* Try to merge */
if (swap_offset == ext->start - 1) {
ext->start--;
return 0;
}
new = &((*new)->rb_left);
} else if (swap_offset > ext->end) {
/* Try to merge */
if (swap_offset == ext->end + 1) {
ext->end++;
return 0;
}
new = &((*new)->rb_right);
} else {
/* It already is in the tree */
return -EINVAL;
}
}
/* Add the new node and rebalance the tree. */
ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL);
if (!ext)
return -ENOMEM;
ext->start = swap_offset;
ext->end = swap_offset;
rb_link_node(&ext->node, parent, new);
rb_insert_color(&ext->node, &swsusp_extents);
return 0;
}
/**
* alloc_swapdev_block - allocate a swap page and register that it has
* been allocated, so that it can be freed in case of an error.
*/
sector_t alloc_swapdev_block(int swap)
{
unsigned long offset;
offset = swp_offset(get_swap_page_of_type(swap));
if (offset) {
if (swsusp_extents_insert(offset))
swap_free(swp_entry(swap, offset));
else
return swapdev_block(swap, offset);
}
return 0;
}
/**
* free_all_swap_pages - free swap pages allocated for saving image data.
* It also frees the extents used to register which swap entries had been
* allocated.
*/
void free_all_swap_pages(int swap)
{
struct rb_node *node;
while ((node = swsusp_extents.rb_node)) {
struct swsusp_extent *ext;
unsigned long offset;
ext = container_of(node, struct swsusp_extent, node);
rb_erase(node, &swsusp_extents);
for (offset = ext->start; offset <= ext->end; offset++)
swap_free(swp_entry(swap, offset));
kfree(ext);
}
}
int swsusp_swap_in_use(void)
{
return (swsusp_extents.rb_node != NULL);
}
/*
* General things
*/
static unsigned short root_swap = 0xffff;
static struct block_device *hib_resume_bdev;
struct hib_bio_batch {
atomic_t count;
wait_queue_head_t wait;
int error;
};
static void hib_init_batch(struct hib_bio_batch *hb)
{
atomic_set(&hb->count, 0);
init_waitqueue_head(&hb->wait);
hb->error = 0;
}
static void hib_end_io(struct bio *bio, int error)
{
struct hib_bio_batch *hb = bio->bi_private;
const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct page *page = bio->bi_io_vec[0].bv_page;
if (!uptodate || error) {
printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
imajor(bio->bi_bdev->bd_inode),
iminor(bio->bi_bdev->bd_inode),
(unsigned long long)bio->bi_iter.bi_sector);
if (!error)
error = -EIO;
}
if (bio_data_dir(bio) == WRITE)
put_page(page);
if (error && !hb->error)
hb->error = error;
if (atomic_dec_and_test(&hb->count))
wake_up(&hb->wait);
bio_put(bio);
}
static int hib_submit_io(int rw, pgoff_t page_off, void *addr,
struct hib_bio_batch *hb)
{
struct page *page = virt_to_page(addr);
struct bio *bio;
int error = 0;
bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio->bi_bdev = hib_resume_bdev;
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
printk(KERN_ERR "PM: Adding page to bio failed at %llu\n",
(unsigned long long)bio->bi_iter.bi_sector);
bio_put(bio);
return -EFAULT;
}
if (hb) {
bio->bi_end_io = hib_end_io;
bio->bi_private = hb;
atomic_inc(&hb->count);
submit_bio(rw, bio);
} else {
error = submit_bio_wait(rw, bio);
bio_put(bio);
}
return error;
}
static int hib_wait_io(struct hib_bio_batch *hb)
{
wait_event(hb->wait, atomic_read(&hb->count) == 0);
return hb->error;
}
/*
* Saving part
*/
static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags)
{
int error;
hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) ||
!memcmp("SWAPSPACE2",swsusp_header->sig, 10)) {
memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10);
memcpy(swsusp_header->sig, HIBERNATE_SIG, 10);
swsusp_header->image = handle->first_sector;
swsusp_header->flags = flags;
if (flags & SF_CRC32_MODE)
swsusp_header->crc32 = handle->crc32;
error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
swsusp_header, NULL);
} else {
printk(KERN_ERR "PM: Swap header not found!\n");
error = -ENODEV;
}
return error;
}
/**
* swsusp_swap_check - check if the resume device is a swap device
* and get its index (if so)
*
* This is called before saving image
*/
static int swsusp_swap_check(void)
{
int res;
res = swap_type_of(swsusp_resume_device, swsusp_resume_block,
&hib_resume_bdev);
if (res < 0)
return res;
root_swap = res;
res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL);
if (res)
return res;
res = set_blocksize(hib_resume_bdev, PAGE_SIZE);
if (res < 0)
blkdev_put(hib_resume_bdev, FMODE_WRITE);
return res;
}
/**
* write_page - Write one page to given swap location.
* @buf: Address we're writing.
* @offset: Offset of the swap page we're writing to.
* @hb: bio completion batch
*/
static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb)
{
void *src;
int ret;
if (!offset)
return -ENOSPC;
if (hb) {
src = (void *)__get_free_page(__GFP_WAIT | __GFP_NOWARN |
__GFP_NORETRY);
if (src) {
copy_page(src, buf);
} else {
ret = hib_wait_io(hb); /* Free pages */
if (ret)
return ret;
src = (void *)__get_free_page(__GFP_WAIT |
__GFP_NOWARN |
__GFP_NORETRY);
if (src) {
copy_page(src, buf);
} else {
WARN_ON_ONCE(1);
hb = NULL; /* Go synchronous */
src = buf;
}
}
} else {
src = buf;
}
return hib_submit_io(WRITE_SYNC, offset, src, hb);
}
static void release_swap_writer(struct swap_map_handle *handle)
{
if (handle->cur)
free_page((unsigned long)handle->cur);
handle->cur = NULL;
}
static int get_swap_writer(struct swap_map_handle *handle)
{
int ret;
ret = swsusp_swap_check();
if (ret) {
if (ret != -ENOSPC)
printk(KERN_ERR "PM: Cannot find swap device, try "
"swapon -a.\n");
return ret;
}
handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL);
if (!handle->cur) {
ret = -ENOMEM;
goto err_close;
}
handle->cur_swap = alloc_swapdev_block(root_swap);
if (!handle->cur_swap) {
ret = -ENOSPC;
goto err_rel;
}
handle->k = 0;
handle->reqd_free_pages = reqd_free_pages();
handle->first_sector = handle->cur_swap;
return 0;
err_rel:
release_swap_writer(handle);
err_close:
swsusp_close(FMODE_WRITE);
return ret;
}
static int swap_write_page(struct swap_map_handle *handle, void *buf,
struct hib_bio_batch *hb)
{
int error = 0;
sector_t offset;
if (!handle->cur)
return -EINVAL;
offset = alloc_swapdev_block(root_swap);
error = write_page(buf, offset, hb);
if (error)
return error;
handle->cur->entries[handle->k++] = offset;
if (handle->k >= MAP_PAGE_ENTRIES) {
offset = alloc_swapdev_block(root_swap);
if (!offset)
return -ENOSPC;
handle->cur->next_swap = offset;
error = write_page(handle->cur, handle->cur_swap, hb);
if (error)
goto out;
clear_page(handle->cur);
handle->cur_swap = offset;
handle->k = 0;
if (hb && low_free_pages() <= handle->reqd_free_pages) {
error = hib_wait_io(hb);
if (error)
goto out;
/*
* Recalculate the number of required free pages, to
* make sure we never take more than half.
*/
handle->reqd_free_pages = reqd_free_pages();
}
}
out:
return error;
}
static int flush_swap_writer(struct swap_map_handle *handle)
{
if (handle->cur && handle->cur_swap)
return write_page(handle->cur, handle->cur_swap, NULL);
else
return -EINVAL;
}
static int swap_writer_finish(struct swap_map_handle *handle,
unsigned int flags, int error)
{
if (!error) {
flush_swap_writer(handle);
printk(KERN_INFO "PM: S");
error = mark_swapfiles(handle, flags);
printk("|\n");
}
if (error)
free_all_swap_pages(root_swap);
release_swap_writer(handle);
swsusp_close(FMODE_WRITE);
return error;
}
/* We need to remember how much compressed data we need to read. */
#define LZO_HEADER sizeof(size_t)
/* Number of pages/bytes we'll compress at one time. */
#define LZO_UNC_PAGES 32
#define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE)
/* Number of pages/bytes we need for compressed data (worst case). */
#define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \
LZO_HEADER, PAGE_SIZE)
#define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE)
/* Maximum number of threads for compression/decompression. */
#define LZO_THREADS 3
/* Minimum/maximum number of pages for read buffering. */
#define LZO_MIN_RD_PAGES 1024
#define LZO_MAX_RD_PAGES 8192
/**
* save_image - save the suspend image data
*/
static int save_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
{
unsigned int m;
int ret;
int nr_pages;
int err2;
struct hib_bio_batch hb;
ktime_t start;
ktime_t stop;
hib_init_batch(&hb);
printk(KERN_INFO "PM: Saving image data pages (%u pages)...\n",
nr_to_write);
m = nr_to_write / 10;
if (!m)
m = 1;
nr_pages = 0;
start = ktime_get();
while (1) {
ret = snapshot_read_next(snapshot);
if (ret <= 0)
break;
ret = swap_write_page(handle, data_of(*snapshot), &hb);
if (ret)
break;
if (!(nr_pages % m))
printk(KERN_INFO "PM: Image saving progress: %3d%%\n",
nr_pages / m * 10);
nr_pages++;
}
err2 = hib_wait_io(&hb);
stop = ktime_get();
if (!ret)
ret = err2;
if (!ret)
printk(KERN_INFO "PM: Image saving done.\n");
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
return ret;
}
/**
* Structure used for CRC32.
*/
struct crc_data {
struct task_struct *thr; /* thread */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
unsigned run_threads; /* nr current threads */
wait_queue_head_t go; /* start crc update */
wait_queue_head_t done; /* crc update done */
u32 *crc32; /* points to handle's crc32 */
size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */
unsigned char *unc[LZO_THREADS]; /* uncompressed data */
};
/**
* CRC32 update function that runs in its own thread.
*/
static int crc32_threadfn(void *data)
{
struct crc_data *d = data;
unsigned i;
while (1) {
wait_event(d->go, atomic_read(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
atomic_set(&d->stop, 1);
wake_up(&d->done);
break;
}
atomic_set(&d->ready, 0);
for (i = 0; i < d->run_threads; i++)
*d->crc32 = crc32_le(*d->crc32,
d->unc[i], *d->unc_len[i]);
atomic_set(&d->stop, 1);
wake_up(&d->done);
}
return 0;
}
/**
* Structure used for LZO data compression.
*/
struct cmp_data {
struct task_struct *thr; /* thread */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
int ret; /* return code */
wait_queue_head_t go; /* start compression */
wait_queue_head_t done; /* compression done */
size_t unc_len; /* uncompressed length */
size_t cmp_len; /* compressed length */
unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */
};
/**
* Compression function that runs in its own thread.
*/
static int lzo_compress_threadfn(void *data)
{
struct cmp_data *d = data;
while (1) {
wait_event(d->go, atomic_read(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
atomic_set(&d->stop, 1);
wake_up(&d->done);
break;
}
atomic_set(&d->ready, 0);
d->ret = lzo1x_1_compress(d->unc, d->unc_len,
d->cmp + LZO_HEADER, &d->cmp_len,
d->wrk);
atomic_set(&d->stop, 1);
wake_up(&d->done);
}
return 0;
}
/**
* save_image_lzo - Save the suspend image data compressed with LZO.
* @handle: Swap map handle to use for saving the image.
* @snapshot: Image to read data from.
* @nr_to_write: Number of pages to save.
*/
static int save_image_lzo(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_write)
{
unsigned int m;
int ret = 0;
int nr_pages;
int err2;
struct hib_bio_batch hb;
ktime_t start;
ktime_t stop;
size_t off;
unsigned thr, run_threads, nr_threads;
unsigned char *page = NULL;
struct cmp_data *data = NULL;
struct crc_data *crc = NULL;
hib_init_batch(&hb);
/*
* We'll limit the number of threads for compression to limit memory
* footprint.
*/
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
if (!page) {
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
ret = -ENOMEM;
goto out_clean;
}
data = vmalloc(sizeof(*data) * nr_threads);
if (!data) {
printk(KERN_ERR "PM: Failed to allocate LZO data\n");
ret = -ENOMEM;
goto out_clean;
}
for (thr = 0; thr < nr_threads; thr++)
memset(&data[thr], 0, offsetof(struct cmp_data, go));
crc = kmalloc(sizeof(*crc), GFP_KERNEL);
if (!crc) {
printk(KERN_ERR "PM: Failed to allocate crc\n");
ret = -ENOMEM;
goto out_clean;
}
memset(crc, 0, offsetof(struct crc_data, go));
/*
* Start the compression threads.
*/
for (thr = 0; thr < nr_threads; thr++) {
init_waitqueue_head(&data[thr].go);
init_waitqueue_head(&data[thr].done);
data[thr].thr = kthread_run(lzo_compress_threadfn,
&data[thr],
"image_compress/%u", thr);
if (IS_ERR(data[thr].thr)) {
data[thr].thr = NULL;
printk(KERN_ERR
"PM: Cannot start compression threads\n");
ret = -ENOMEM;
goto out_clean;
}
}
/*
* Start the CRC32 thread.
*/
init_waitqueue_head(&crc->go);
init_waitqueue_head(&crc->done);
handle->crc32 = 0;
crc->crc32 = &handle->crc32;
for (thr = 0; thr < nr_threads; thr++) {
crc->unc[thr] = data[thr].unc;
crc->unc_len[thr] = &data[thr].unc_len;
}
crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
if (IS_ERR(crc->thr)) {
crc->thr = NULL;
printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
ret = -ENOMEM;
goto out_clean;
}
/*
* Adjust the number of required free pages after all allocations have
* been done. We don't want to run out of pages when writing.
*/
handle->reqd_free_pages = reqd_free_pages();
printk(KERN_INFO
"PM: Using %u thread(s) for compression.\n"
"PM: Compressing and saving image data (%u pages)...\n",
nr_threads, nr_to_write);
m = nr_to_write / 10;
if (!m)
m = 1;
nr_pages = 0;
start = ktime_get();
for (;;) {
for (thr = 0; thr < nr_threads; thr++) {
for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) {
ret = snapshot_read_next(snapshot);
if (ret < 0)
goto out_finish;
if (!ret)
break;
memcpy(data[thr].unc + off,
data_of(*snapshot), PAGE_SIZE);
if (!(nr_pages % m))
printk(KERN_INFO
"PM: Image saving progress: "
"%3d%%\n",
nr_pages / m * 10);
nr_pages++;
}
if (!off)
break;
data[thr].unc_len = off;
atomic_set(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
if (!thr)
break;
crc->run_threads = thr;
atomic_set(&crc->ready, 1);
wake_up(&crc->go);
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
atomic_read(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
if (ret < 0) {
printk(KERN_ERR "PM: LZO compression failed\n");
goto out_finish;
}
if (unlikely(!data[thr].cmp_len ||
data[thr].cmp_len >
lzo1x_worst_compress(data[thr].unc_len))) {
printk(KERN_ERR
"PM: Invalid LZO compressed length\n");
ret = -1;
goto out_finish;
}
*(size_t *)data[thr].cmp = data[thr].cmp_len;
/*
* Given we are writing one page at a time to disk, we
* copy that much from the buffer, although the last
* bit will likely be smaller than full page. This is
* OK - we saved the length of the compressed data, so
* any garbage at the end will be discarded when we
* read it.
*/
for (off = 0;
off < LZO_HEADER + data[thr].cmp_len;
off += PAGE_SIZE) {
memcpy(page, data[thr].cmp + off, PAGE_SIZE);
ret = swap_write_page(handle, page, &hb);
if (ret)
goto out_finish;
}
}
wait_event(crc->done, atomic_read(&crc->stop));
atomic_set(&crc->stop, 0);
}
out_finish:
err2 = hib_wait_io(&hb);
stop = ktime_get();
if (!ret)
ret = err2;
if (!ret)
printk(KERN_INFO "PM: Image saving done.\n");
swsusp_show_speed(start, stop, nr_to_write, "Wrote");
out_clean:
if (crc) {
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc);
}
if (data) {
for (thr = 0; thr < nr_threads; thr++)
if (data[thr].thr)
kthread_stop(data[thr].thr);
vfree(data);
}
if (page) free_page((unsigned long)page);
return ret;
}
/**
* enough_swap - Make sure we have enough swap to save the image.
*
* Returns TRUE or FALSE after checking the total amount of swap
* space avaiable from the resume partition.
*/
static int enough_swap(unsigned int nr_pages, unsigned int flags)
{
unsigned int free_swap = count_swap_pages(root_swap, 1);
unsigned int required;
pr_debug("PM: Free swap pages: %u\n", free_swap);
required = PAGES_FOR_IO + nr_pages;
return free_swap > required;
}
/**
* swsusp_write - Write entire image and metadata.
* @flags: flags to pass to the "boot" kernel in the image header
*
* It is important _NOT_ to umount filesystems at this point. We want
* them synced (in case something goes wrong) but we DO not want to mark
* filesystem clean: it is not. (And it does not matter, if we resume
* correctly, we'll mark system clean, anyway.)
*/
int swsusp_write(unsigned int flags)
{
struct swap_map_handle handle;
struct snapshot_handle snapshot;
struct swsusp_info *header;
unsigned long pages;
int error;
pages = snapshot_get_image_size();
error = get_swap_writer(&handle);
if (error) {
printk(KERN_ERR "PM: Cannot get swap writer\n");
return error;
}
if (flags & SF_NOCOMPRESS_MODE) {
if (!enough_swap(pages, flags)) {
printk(KERN_ERR "PM: Not enough free swap\n");
error = -ENOSPC;
goto out_finish;
}
}
memset(&snapshot, 0, sizeof(struct snapshot_handle));
error = snapshot_read_next(&snapshot);
if (error < PAGE_SIZE) {
if (error >= 0)
error = -EFAULT;
goto out_finish;
}
header = (struct swsusp_info *)data_of(snapshot);
error = swap_write_page(&handle, header, NULL);
if (!error) {
error = (flags & SF_NOCOMPRESS_MODE) ?
save_image(&handle, &snapshot, pages - 1) :
save_image_lzo(&handle, &snapshot, pages - 1);
}
out_finish:
error = swap_writer_finish(&handle, flags, error);
return error;
}
/**
* The following functions allow us to read data using a swap map
* in a file-alike way
*/
static void release_swap_reader(struct swap_map_handle *handle)
{
struct swap_map_page_list *tmp;
while (handle->maps) {
if (handle->maps->map)
free_page((unsigned long)handle->maps->map);
tmp = handle->maps;
handle->maps = handle->maps->next;
kfree(tmp);
}
handle->cur = NULL;
}
static int get_swap_reader(struct swap_map_handle *handle,
unsigned int *flags_p)
{
int error;
struct swap_map_page_list *tmp, *last;
sector_t offset;
*flags_p = swsusp_header->flags;
if (!swsusp_header->image) /* how can this happen? */
return -EINVAL;
handle->cur = NULL;
last = handle->maps = NULL;
offset = swsusp_header->image;
while (offset) {
tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL);
if (!tmp) {
release_swap_reader(handle);
return -ENOMEM;
}
memset(tmp, 0, sizeof(*tmp));
if (!handle->maps)
handle->maps = tmp;
if (last)
last->next = tmp;
last = tmp;
tmp->map = (struct swap_map_page *)
__get_free_page(__GFP_WAIT | __GFP_HIGH);
if (!tmp->map) {
release_swap_reader(handle);
return -ENOMEM;
}
error = hib_submit_io(READ_SYNC, offset, tmp->map, NULL);
if (error) {
release_swap_reader(handle);
return error;
}
offset = tmp->map->next_swap;
}
handle->k = 0;
handle->cur = handle->maps->map;
return 0;
}
static int swap_read_page(struct swap_map_handle *handle, void *buf,
struct hib_bio_batch *hb)
{
sector_t offset;
int error;
struct swap_map_page_list *tmp;
if (!handle->cur)
return -EINVAL;
offset = handle->cur->entries[handle->k];
if (!offset)
return -EFAULT;
error = hib_submit_io(READ_SYNC, offset, buf, hb);
if (error)
return error;
if (++handle->k >= MAP_PAGE_ENTRIES) {
handle->k = 0;
free_page((unsigned long)handle->maps->map);
tmp = handle->maps;
handle->maps = handle->maps->next;
kfree(tmp);
if (!handle->maps)
release_swap_reader(handle);
else
handle->cur = handle->maps->map;
}
return error;
}
static int swap_reader_finish(struct swap_map_handle *handle)
{
release_swap_reader(handle);
return 0;
}
/**
* load_image - load the image using the swap map handle
* @handle and the snapshot handle @snapshot
* (assume there are @nr_pages pages to load)
*/
static int load_image(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_read)
{
unsigned int m;
int ret = 0;
ktime_t start;
ktime_t stop;
struct hib_bio_batch hb;
int err2;
unsigned nr_pages;
hib_init_batch(&hb);
printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
nr_to_read);
m = nr_to_read / 10;
if (!m)
m = 1;
nr_pages = 0;
start = ktime_get();
for ( ; ; ) {
ret = snapshot_write_next(snapshot);
if (ret <= 0)
break;
ret = swap_read_page(handle, data_of(*snapshot), &hb);
if (ret)
break;
if (snapshot->sync_read)
ret = hib_wait_io(&hb);
if (ret)
break;
if (!(nr_pages % m))
printk(KERN_INFO "PM: Image loading progress: %3d%%\n",
nr_pages / m * 10);
nr_pages++;
}
err2 = hib_wait_io(&hb);
stop = ktime_get();
if (!ret)
ret = err2;
if (!ret) {
printk(KERN_INFO "PM: Image loading done.\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
ret = -ENODATA;
}
swsusp_show_speed(start, stop, nr_to_read, "Read");
return ret;
}
/**
* Structure used for LZO data decompression.
*/
struct dec_data {
struct task_struct *thr; /* thread */
atomic_t ready; /* ready to start flag */
atomic_t stop; /* ready to stop flag */
int ret; /* return code */
wait_queue_head_t go; /* start decompression */
wait_queue_head_t done; /* decompression done */
size_t unc_len; /* uncompressed length */
size_t cmp_len; /* compressed length */
unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */
unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */
};
/**
* Deompression function that runs in its own thread.
*/
static int lzo_decompress_threadfn(void *data)
{
struct dec_data *d = data;
while (1) {
wait_event(d->go, atomic_read(&d->ready) ||
kthread_should_stop());
if (kthread_should_stop()) {
d->thr = NULL;
d->ret = -1;
atomic_set(&d->stop, 1);
wake_up(&d->done);
break;
}
atomic_set(&d->ready, 0);
d->unc_len = LZO_UNC_SIZE;
d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
d->unc, &d->unc_len);
atomic_set(&d->stop, 1);
wake_up(&d->done);
}
return 0;
}
/**
* load_image_lzo - Load compressed image data and decompress them with LZO.
* @handle: Swap map handle to use for loading data.
* @snapshot: Image to copy uncompressed data into.
* @nr_to_read: Number of pages to load.
*/
static int load_image_lzo(struct swap_map_handle *handle,
struct snapshot_handle *snapshot,
unsigned int nr_to_read)
{
unsigned int m;
int ret = 0;
int eof = 0;
struct hib_bio_batch hb;
ktime_t start;
ktime_t stop;
unsigned nr_pages;
size_t off;
unsigned i, thr, run_threads, nr_threads;
unsigned ring = 0, pg = 0, ring_size = 0,
have = 0, want, need, asked = 0;
unsigned long read_pages = 0;
unsigned char **page = NULL;
struct dec_data *data = NULL;
struct crc_data *crc = NULL;
hib_init_batch(&hb);
/*
* We'll limit the number of threads for decompression to limit memory
* footprint.
*/
nr_threads = num_online_cpus() - 1;
nr_threads = clamp_val(nr_threads, 1, LZO_THREADS);
page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES);
if (!page) {
printk(KERN_ERR "PM: Failed to allocate LZO page\n");
ret = -ENOMEM;
goto out_clean;
}
data = vmalloc(sizeof(*data) * nr_threads);
if (!data) {
printk(KERN_ERR "PM: Failed to allocate LZO data\n");
ret = -ENOMEM;
goto out_clean;
}
for (thr = 0; thr < nr_threads; thr++)
memset(&data[thr], 0, offsetof(struct dec_data, go));
crc = kmalloc(sizeof(*crc), GFP_KERNEL);
if (!crc) {
printk(KERN_ERR "PM: Failed to allocate crc\n");
ret = -ENOMEM;
goto out_clean;
}
memset(crc, 0, offsetof(struct crc_data, go));
/*
* Start the decompression threads.
*/
for (thr = 0; thr < nr_threads; thr++) {
init_waitqueue_head(&data[thr].go);
init_waitqueue_head(&data[thr].done);
data[thr].thr = kthread_run(lzo_decompress_threadfn,
&data[thr],
"image_decompress/%u", thr);
if (IS_ERR(data[thr].thr)) {
data[thr].thr = NULL;
printk(KERN_ERR
"PM: Cannot start decompression threads\n");
ret = -ENOMEM;
goto out_clean;
}
}
/*
* Start the CRC32 thread.
*/
init_waitqueue_head(&crc->go);
init_waitqueue_head(&crc->done);
handle->crc32 = 0;
crc->crc32 = &handle->crc32;
for (thr = 0; thr < nr_threads; thr++) {
crc->unc[thr] = data[thr].unc;
crc->unc_len[thr] = &data[thr].unc_len;
}
crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32");
if (IS_ERR(crc->thr)) {
crc->thr = NULL;
printk(KERN_ERR "PM: Cannot start CRC32 thread\n");
ret = -ENOMEM;
goto out_clean;
}
/*
* Set the number of pages for read buffering.
* This is complete guesswork, because we'll only know the real
* picture once prepare_image() is called, which is much later on
* during the image load phase. We'll assume the worst case and
* say that none of the image pages are from high memory.
*/
if (low_free_pages() > snapshot_get_image_size())
read_pages = (low_free_pages() - snapshot_get_image_size()) / 2;
read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES);
for (i = 0; i < read_pages; i++) {
page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ?
__GFP_WAIT | __GFP_HIGH :
__GFP_WAIT | __GFP_NOWARN |
__GFP_NORETRY);
if (!page[i]) {
if (i < LZO_CMP_PAGES) {
ring_size = i;
printk(KERN_ERR
"PM: Failed to allocate LZO pages\n");
ret = -ENOMEM;
goto out_clean;
} else {
break;
}
}
}
want = ring_size = i;
printk(KERN_INFO
"PM: Using %u thread(s) for decompression.\n"
"PM: Loading and decompressing image data (%u pages)...\n",
nr_threads, nr_to_read);
m = nr_to_read / 10;
if (!m)
m = 1;
nr_pages = 0;
start = ktime_get();
ret = snapshot_write_next(snapshot);
if (ret <= 0)
goto out_finish;
for(;;) {
for (i = 0; !eof && i < want; i++) {
ret = swap_read_page(handle, page[ring], &hb);
if (ret) {
/*
* On real read error, finish. On end of data,
* set EOF flag and just exit the read loop.
*/
if (handle->cur &&
handle->cur->entries[handle->k]) {
goto out_finish;
} else {
eof = 1;
break;
}
}
if (++ring >= ring_size)
ring = 0;
}
asked += i;
want -= i;
/*
* We are out of data, wait for some more.
*/
if (!have) {
if (!asked)
break;
ret = hib_wait_io(&hb);
if (ret)
goto out_finish;
have += asked;
asked = 0;
if (eof)
eof = 2;
}
if (crc->run_threads) {
wait_event(crc->done, atomic_read(&crc->stop));
atomic_set(&crc->stop, 0);
crc->run_threads = 0;
}
for (thr = 0; have && thr < nr_threads; thr++) {
data[thr].cmp_len = *(size_t *)page[pg];
if (unlikely(!data[thr].cmp_len ||
data[thr].cmp_len >
lzo1x_worst_compress(LZO_UNC_SIZE))) {
printk(KERN_ERR
"PM: Invalid LZO compressed length\n");
ret = -1;
goto out_finish;
}
need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER,
PAGE_SIZE);
if (need > have) {
if (eof > 1) {
ret = -1;
goto out_finish;
}
break;
}
for (off = 0;
off < LZO_HEADER + data[thr].cmp_len;
off += PAGE_SIZE) {
memcpy(data[thr].cmp + off,
page[pg], PAGE_SIZE);
have--;
want++;
if (++pg >= ring_size)
pg = 0;
}
atomic_set(&data[thr].ready, 1);
wake_up(&data[thr].go);
}
/*
* Wait for more data while we are decompressing.
*/
if (have < LZO_CMP_PAGES && asked) {
ret = hib_wait_io(&hb);
if (ret)
goto out_finish;
have += asked;
asked = 0;
if (eof)
eof = 2;
}
for (run_threads = thr, thr = 0; thr < run_threads; thr++) {
wait_event(data[thr].done,
atomic_read(&data[thr].stop));
atomic_set(&data[thr].stop, 0);
ret = data[thr].ret;
if (ret < 0) {
printk(KERN_ERR
"PM: LZO decompression failed\n");
goto out_finish;
}
if (unlikely(!data[thr].unc_len ||
data[thr].unc_len > LZO_UNC_SIZE ||
data[thr].unc_len & (PAGE_SIZE - 1))) {
printk(KERN_ERR
"PM: Invalid LZO uncompressed length\n");
ret = -1;
goto out_finish;
}
for (off = 0;
off < data[thr].unc_len; off += PAGE_SIZE) {
memcpy(data_of(*snapshot),
data[thr].unc + off, PAGE_SIZE);
if (!(nr_pages % m))
printk(KERN_INFO
"PM: Image loading progress: "
"%3d%%\n",
nr_pages / m * 10);
nr_pages++;
ret = snapshot_write_next(snapshot);
if (ret <= 0) {
crc->run_threads = thr + 1;
atomic_set(&crc->ready, 1);
wake_up(&crc->go);
goto out_finish;
}
}
}
crc->run_threads = thr;
atomic_set(&crc->ready, 1);
wake_up(&crc->go);
}
out_finish:
if (crc->run_threads) {
wait_event(crc->done, atomic_read(&crc->stop));
atomic_set(&crc->stop, 0);
}
stop = ktime_get();
if (!ret) {
printk(KERN_INFO "PM: Image loading done.\n");
snapshot_write_finalize(snapshot);
if (!snapshot_image_loaded(snapshot))
ret = -ENODATA;
if (!ret) {
if (swsusp_header->flags & SF_CRC32_MODE) {
if(handle->crc32 != swsusp_header->crc32) {
printk(KERN_ERR
"PM: Invalid image CRC32!\n");
ret = -ENODATA;
}
}
}
}
swsusp_show_speed(start, stop, nr_to_read, "Read");
out_clean:
for (i = 0; i < ring_size; i++)
free_page((unsigned long)page[i]);
if (crc) {
if (crc->thr)
kthread_stop(crc->thr);
kfree(crc);
}
if (data) {
for (thr = 0; thr < nr_threads; thr++)
if (data[thr].thr)
kthread_stop(data[thr].thr);
vfree(data);
}
vfree(page);
return ret;
}
/**
* swsusp_read - read the hibernation image.
* @flags_p: flags passed by the "frozen" kernel in the image header should
* be written into this memory location
*/
int swsusp_read(unsigned int *flags_p)
{
int error;
struct swap_map_handle handle;
struct snapshot_handle snapshot;
struct swsusp_info *header;
memset(&snapshot, 0, sizeof(struct snapshot_handle));
error = snapshot_write_next(&snapshot);
if (error < PAGE_SIZE)
return error < 0 ? error : -EFAULT;
header = (struct swsusp_info *)data_of(snapshot);
error = get_swap_reader(&handle, flags_p);
if (error)
goto end;
if (!error)
error = swap_read_page(&handle, header, NULL);
if (!error) {
error = (*flags_p & SF_NOCOMPRESS_MODE) ?
load_image(&handle, &snapshot, header->pages - 1) :
load_image_lzo(&handle, &snapshot, header->pages - 1);
}
swap_reader_finish(&handle);
end:
if (!error)
pr_debug("PM: Image successfully loaded\n");
else
pr_debug("PM: Error %d resuming\n", error);
return error;
}
/**
* swsusp_check - Check for swsusp signature in the resume device
*/
int swsusp_check(void)
{
int error;
hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device,
FMODE_READ, NULL);
if (!IS_ERR(hib_resume_bdev)) {
set_blocksize(hib_resume_bdev, PAGE_SIZE);
clear_page(swsusp_header);
error = hib_submit_io(READ_SYNC, swsusp_resume_block,
swsusp_header, NULL);
if (error)
goto put;
if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10);
/* Reset swap signature now */
error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
swsusp_header, NULL);
} else {
error = -EINVAL;
}
put:
if (error)
blkdev_put(hib_resume_bdev, FMODE_READ);
else
pr_debug("PM: Image signature found, resuming\n");
} else {
error = PTR_ERR(hib_resume_bdev);
}
if (error)
pr_debug("PM: Image not found (code %d)\n", error);
return error;
}
/**
* swsusp_close - close swap device.
*/
void swsusp_close(fmode_t mode)
{
if (IS_ERR(hib_resume_bdev)) {
pr_debug("PM: Image device not initialised\n");
return;
}
blkdev_put(hib_resume_bdev, mode);
}
/**
* swsusp_unmark - Unmark swsusp signature in the resume device
*/
#ifdef CONFIG_SUSPEND
int swsusp_unmark(void)
{
int error;
hib_submit_io(READ_SYNC, swsusp_resume_block, swsusp_header, NULL);
if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) {
memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10);
error = hib_submit_io(WRITE_SYNC, swsusp_resume_block,
swsusp_header, NULL);
} else {
printk(KERN_ERR "PM: Cannot find swsusp signature!\n");
error = -ENODEV;
}
/*
* We just returned from suspend, we don't need the image any more.
*/
free_all_swap_pages(root_swap);
return error;
}
#endif
static int swsusp_header_init(void)
{
swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL);
if (!swsusp_header)
panic("Could not allocate memory for swsusp_header\n");
return 0;
}
core_initcall(swsusp_header_init);
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/power/user.c
/*
* linux/kernel/power/user.c
*
* This file provides the user space interface for software suspend/resume.
*
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*
* This file is released under the GPLv2.
*
*/
#include <linux/suspend.h>
#include <linux/syscalls.h>
#include <linux/reboot.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pm.h>
#include <linux/fs.h>
#include <linux/compat.h>
#include <linux/console.h>
#include <linux/cpu.h>
#include <linux/freezer.h>
#include <asm/uaccess.h>
#include "power.h"
#define SNAPSHOT_MINOR 231
static struct snapshot_data {
struct snapshot_handle handle;
int swap;
int mode;
bool frozen;
bool ready;
bool platform_support;
bool free_bitmaps;
} snapshot_state;
atomic_t snapshot_device_available = ATOMIC_INIT(1);
static int snapshot_open(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
int error;
if (!hibernation_available())
return -EPERM;
lock_system_sleep();
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
error = -EBUSY;
goto Unlock;
}
if ((filp->f_flags & O_ACCMODE) == O_RDWR) {
atomic_inc(&snapshot_device_available);
error = -ENOSYS;
goto Unlock;
}
nonseekable_open(inode, filp);
data = &snapshot_state;
filp->private_data = data;
memset(&data->handle, 0, sizeof(struct snapshot_handle));
if ((filp->f_flags & O_ACCMODE) == O_RDONLY) {
/* Hibernating. The image device should be accessible. */
data->swap = swsusp_resume_device ?
swap_type_of(swsusp_resume_device, 0, NULL) : -1;
data->mode = O_RDONLY;
data->free_bitmaps = false;
error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
if (error)
pm_notifier_call_chain(PM_POST_HIBERNATION);
} else {
/*
* Resuming. We may need to wait for the image device to
* appear.
*/
wait_for_device_probe();
data->swap = -1;
data->mode = O_WRONLY;
error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
if (!error) {
error = create_basic_memory_bitmaps();
data->free_bitmaps = !error;
}
if (error)
pm_notifier_call_chain(PM_POST_RESTORE);
}
if (error)
atomic_inc(&snapshot_device_available);
data->frozen = false;
data->ready = false;
data->platform_support = false;
Unlock:
unlock_system_sleep();
return error;
}
static int snapshot_release(struct inode *inode, struct file *filp)
{
struct snapshot_data *data;
lock_system_sleep();
swsusp_free();
data = filp->private_data;
free_all_swap_pages(data->swap);
if (data->frozen) {
pm_restore_gfp_mask();
free_basic_memory_bitmaps();
thaw_processes();
} else if (data->free_bitmaps) {
free_basic_memory_bitmaps();
}
pm_notifier_call_chain(data->mode == O_RDONLY ?
PM_POST_HIBERNATION : PM_POST_RESTORE);
atomic_inc(&snapshot_device_available);
unlock_system_sleep();
return 0;
}
static ssize_t snapshot_read(struct file *filp, char __user *buf,
size_t count, loff_t *offp)
{
struct snapshot_data *data;
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
lock_system_sleep();
data = filp->private_data;
if (!data->ready) {
res = -ENODATA;
goto Unlock;
}
if (!pg_offp) { /* on page boundary? */
res = snapshot_read_next(&data->handle);
if (res <= 0)
goto Unlock;
} else {
res = PAGE_SIZE - pg_offp;
}
res = simple_read_from_buffer(buf, count, &pg_offp,
data_of(data->handle), res);
if (res > 0)
*offp += res;
Unlock:
unlock_system_sleep();
return res;
}
static ssize_t snapshot_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offp)
{
struct snapshot_data *data;
ssize_t res;
loff_t pg_offp = *offp & ~PAGE_MASK;
lock_system_sleep();
data = filp->private_data;
if (!pg_offp) {
res = snapshot_write_next(&data->handle);
if (res <= 0)
goto unlock;
} else {
res = PAGE_SIZE - pg_offp;
}
res = simple_write_to_buffer(data_of(data->handle), res, &pg_offp,
buf, count);
if (res > 0)
*offp += res;
unlock:
unlock_system_sleep();
return res;
}
static long snapshot_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int error = 0;
struct snapshot_data *data;
loff_t size;
sector_t offset;
if (_IOC_TYPE(cmd) != SNAPSHOT_IOC_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > SNAPSHOT_IOC_MAXNR)
return -ENOTTY;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (!mutex_trylock(&pm_mutex))
return -EBUSY;
lock_device_hotplug();
data = filp->private_data;
switch (cmd) {
case SNAPSHOT_FREEZE:
if (data->frozen)
break;
printk("Syncing filesystems ... ");
sys_sync();
printk("done.\n");
error = freeze_processes();
if (error)
break;
error = create_basic_memory_bitmaps();
if (error)
thaw_processes();
else
data->frozen = true;
break;
case SNAPSHOT_UNFREEZE:
if (!data->frozen || data->ready)
break;
pm_restore_gfp_mask();
free_basic_memory_bitmaps();
data->free_bitmaps = false;
thaw_processes();
data->frozen = false;
break;
case SNAPSHOT_CREATE_IMAGE:
if (data->mode != O_RDONLY || !data->frozen || data->ready) {
error = -EPERM;
break;
}
pm_restore_gfp_mask();
error = hibernation_snapshot(data->platform_support);
if (!error) {
error = put_user(in_suspend, (int __user *)arg);
data->ready = !freezer_test_done && !error;
freezer_test_done = false;
}
break;
case SNAPSHOT_ATOMIC_RESTORE:
snapshot_write_finalize(&data->handle);
if (data->mode != O_WRONLY || !data->frozen ||
!snapshot_image_loaded(&data->handle)) {
error = -EPERM;
break;
}
error = hibernation_restore(data->platform_support);
break;
case SNAPSHOT_FREE:
swsusp_free();
memset(&data->handle, 0, sizeof(struct snapshot_handle));
data->ready = false;
/*
* It is necessary to thaw kernel threads here, because
* SNAPSHOT_CREATE_IMAGE may be invoked directly after
* SNAPSHOT_FREE. In that case, if kernel threads were not
* thawed, the preallocation of memory carried out by
* hibernation_snapshot() might run into problems (i.e. it
* might fail or even deadlock).
*/
thaw_kernel_threads();
break;
case SNAPSHOT_PREF_IMAGE_SIZE:
image_size = arg;
break;
case SNAPSHOT_GET_IMAGE_SIZE:
if (!data->ready) {
error = -ENODATA;
break;
}
size = snapshot_get_image_size();
size <<= PAGE_SHIFT;
error = put_user(size, (loff_t __user *)arg);
break;
case SNAPSHOT_AVAIL_SWAP_SIZE:
size = count_swap_pages(data->swap, 1);
size <<= PAGE_SHIFT;
error = put_user(size, (loff_t __user *)arg);
break;
case SNAPSHOT_ALLOC_SWAP_PAGE:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
break;
}
offset = alloc_swapdev_block(data->swap);
if (offset) {
offset <<= PAGE_SHIFT;
error = put_user(offset, (loff_t __user *)arg);
} else {
error = -ENOSPC;
}
break;
case SNAPSHOT_FREE_SWAP_PAGES:
if (data->swap < 0 || data->swap >= MAX_SWAPFILES) {
error = -ENODEV;
break;
}
free_all_swap_pages(data->swap);
break;
case SNAPSHOT_S2RAM:
if (!data->frozen) {
error = -EPERM;
break;
}
/*
* Tasks are frozen and the notifiers have been called with
* PM_HIBERNATION_PREPARE
*/
error = suspend_devices_and_enter(PM_SUSPEND_MEM);
data->ready = false;
break;
case SNAPSHOT_PLATFORM_SUPPORT:
data->platform_support = !!arg;
break;
case SNAPSHOT_POWER_OFF:
if (data->platform_support)
error = hibernation_platform_enter();
break;
case SNAPSHOT_SET_SWAP_AREA:
if (swsusp_swap_in_use()) {
error = -EPERM;
} else {
struct resume_swap_area swap_area;
dev_t swdev;
error = copy_from_user(&swap_area, (void __user *)arg,
sizeof(struct resume_swap_area));
if (error) {
error = -EFAULT;
break;
}
/*
* User space encodes device types as two-byte values,
* so we need to recode them
*/
swdev = new_decode_dev(swap_area.dev);
if (swdev) {
offset = swap_area.offset;
data->swap = swap_type_of(swdev, offset, NULL);
if (data->swap < 0)
error = -ENODEV;
} else {
data->swap = -1;
error = -EINVAL;
}
}
break;
default:
error = -ENOTTY;
}
unlock_device_hotplug();
mutex_unlock(&pm_mutex);
return error;
}
#ifdef CONFIG_COMPAT
struct compat_resume_swap_area {
compat_loff_t offset;
u32 dev;
} __packed;
static long
snapshot_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
BUILD_BUG_ON(sizeof(loff_t) != sizeof(compat_loff_t));
switch (cmd) {
case SNAPSHOT_GET_IMAGE_SIZE:
case SNAPSHOT_AVAIL_SWAP_SIZE:
case SNAPSHOT_ALLOC_SWAP_PAGE: {
compat_loff_t __user *uoffset = compat_ptr(arg);
loff_t offset;
mm_segment_t old_fs;
int err;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = snapshot_ioctl(file, cmd, (unsigned long) &offset);
set_fs(old_fs);
if (!err && put_user(offset, uoffset))
err = -EFAULT;
return err;
}
case SNAPSHOT_CREATE_IMAGE:
return snapshot_ioctl(file, cmd,
(unsigned long) compat_ptr(arg));
case SNAPSHOT_SET_SWAP_AREA: {
struct compat_resume_swap_area __user *u_swap_area =
compat_ptr(arg);
struct resume_swap_area swap_area;
mm_segment_t old_fs;
int err;
err = get_user(swap_area.offset, &u_swap_area->offset);
err |= get_user(swap_area.dev, &u_swap_area->dev);
if (err)
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = snapshot_ioctl(file, SNAPSHOT_SET_SWAP_AREA,
(unsigned long) &swap_area);
set_fs(old_fs);
return err;
}
default:
return snapshot_ioctl(file, cmd, arg);
}
}
#endif /* CONFIG_COMPAT */
static const struct file_operations snapshot_fops = {
.open = snapshot_open,
.release = snapshot_release,
.read = snapshot_read,
.write = snapshot_write,
.llseek = no_llseek,
.unlocked_ioctl = snapshot_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = snapshot_compat_ioctl,
#endif
};
static struct miscdevice snapshot_device = {
.minor = SNAPSHOT_MINOR,
.name = "snapshot",
.fops = &snapshot_fops,
};
static int __init snapshot_device_init(void)
{
return misc_register(&snapshot_device);
};
device_initcall(snapshot_device_init);
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/power/wakelock.c
/*
* kernel/power/wakelock.c
*
* User space wakeup sources support.
*
* Copyright (C) 2012 Rafael J. Wysocki <rjw@sisk.pl>
*
* This code is based on the analogous interface allowing user space to
* manipulate wakelocks on Android.
*/
#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/hrtimer.h>
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/slab.h>
#include "power.h"
static DEFINE_MUTEX(wakelocks_lock);
struct wakelock {
char *name;
struct rb_node node;
struct wakeup_source ws;
#ifdef CONFIG_PM_WAKELOCKS_GC
struct list_head lru;
#endif
};
static struct rb_root wakelocks_tree = RB_ROOT;
ssize_t pm_show_wakelocks(char *buf, bool show_active)
{
struct rb_node *node;
struct wakelock *wl;
char *str = buf;
char *end = buf + PAGE_SIZE;
mutex_lock(&wakelocks_lock);
for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
wl = rb_entry(node, struct wakelock, node);
if (wl->ws.active == show_active)
str += scnprintf(str, end - str, "%s ", wl->name);
}
if (str > buf)
str--;
str += scnprintf(str, end - str, "\n");
mutex_unlock(&wakelocks_lock);
return (str - buf);
}
#if CONFIG_PM_WAKELOCKS_LIMIT > 0
static unsigned int number_of_wakelocks;
static inline bool wakelocks_limit_exceeded(void)
{
return number_of_wakelocks > CONFIG_PM_WAKELOCKS_LIMIT;
}
static inline void increment_wakelocks_number(void)
{
number_of_wakelocks++;
}
static inline void decrement_wakelocks_number(void)
{
number_of_wakelocks--;
}
#else /* CONFIG_PM_WAKELOCKS_LIMIT = 0 */
static inline bool wakelocks_limit_exceeded(void) { return false; }
static inline void increment_wakelocks_number(void) {}
static inline void decrement_wakelocks_number(void) {}
#endif /* CONFIG_PM_WAKELOCKS_LIMIT */
#ifdef CONFIG_PM_WAKELOCKS_GC
#define WL_GC_COUNT_MAX 100
#define WL_GC_TIME_SEC 300
static LIST_HEAD(wakelocks_lru_list);
static unsigned int wakelocks_gc_count;
static inline void wakelocks_lru_add(struct wakelock *wl)
{
list_add(&wl->lru, &wakelocks_lru_list);
}
static inline void wakelocks_lru_most_recent(struct wakelock *wl)
{
list_move(&wl->lru, &wakelocks_lru_list);
}
static void wakelocks_gc(void)
{
struct wakelock *wl, *aux;
ktime_t now;
if (++wakelocks_gc_count <= WL_GC_COUNT_MAX)
return;
now = ktime_get();
list_for_each_entry_safe_reverse(wl, aux, &wakelocks_lru_list, lru) {
u64 idle_time_ns;
bool active;
spin_lock_irq(&wl->ws.lock);
idle_time_ns = ktime_to_ns(ktime_sub(now, wl->ws.last_time));
active = wl->ws.active;
spin_unlock_irq(&wl->ws.lock);
if (idle_time_ns < ((u64)WL_GC_TIME_SEC * NSEC_PER_SEC))
break;
if (!active) {
wakeup_source_remove(&wl->ws);
rb_erase(&wl->node, &wakelocks_tree);
list_del(&wl->lru);
kfree(wl->name);
kfree(wl);
decrement_wakelocks_number();
}
}
wakelocks_gc_count = 0;
}
#else /* !CONFIG_PM_WAKELOCKS_GC */
static inline void wakelocks_lru_add(struct wakelock *wl) {}
static inline void wakelocks_lru_most_recent(struct wakelock *wl) {}
static inline void wakelocks_gc(void) {}
#endif /* !CONFIG_PM_WAKELOCKS_GC */
static struct wakelock *wakelock_lookup_add(const char *name, size_t len,
bool add_if_not_found)
{
struct rb_node **node = &wakelocks_tree.rb_node;
struct rb_node *parent = *node;
struct wakelock *wl;
while (*node) {
int diff;
parent = *node;
wl = rb_entry(*node, struct wakelock, node);
diff = strncmp(name, wl->name, len);
if (diff == 0) {
if (wl->name[len])
diff = -1;
else
return wl;
}
if (diff < 0)
node = &(*node)->rb_left;
else
node = &(*node)->rb_right;
}
if (!add_if_not_found)
return ERR_PTR(-EINVAL);
if (wakelocks_limit_exceeded())
return ERR_PTR(-ENOSPC);
/* Not found, we have to add a new one. */
wl = kzalloc(sizeof(*wl), GFP_KERNEL);
if (!wl)
return ERR_PTR(-ENOMEM);
wl->name = kstrndup(name, len, GFP_KERNEL);
if (!wl->name) {
kfree(wl);
return ERR_PTR(-ENOMEM);
}
wl->ws.name = wl->name;
wakeup_source_add(&wl->ws);
rb_link_node(&wl->node, parent, node);
rb_insert_color(&wl->node, &wakelocks_tree);
wakelocks_lru_add(wl);
increment_wakelocks_number();
return wl;
}
int pm_wake_lock(const char *buf)
{
const char *str = buf;
struct wakelock *wl;
u64 timeout_ns = 0;
size_t len;
int ret = 0;
if (!capable(CAP_BLOCK_SUSPEND))
return -EPERM;
while (*str && !isspace(*str))
str++;
len = str - buf;
if (!len)
return -EINVAL;
if (*str && *str != '\n') {
/* Find out if there's a valid timeout string appended. */
ret = kstrtou64(skip_spaces(str), 10, &timeout_ns);
if (ret)
return -EINVAL;
}
mutex_lock(&wakelocks_lock);
wl = wakelock_lookup_add(buf, len, true);
if (IS_ERR(wl)) {
ret = PTR_ERR(wl);
goto out;
}
if (timeout_ns) {
u64 timeout_ms = timeout_ns + NSEC_PER_MSEC - 1;
do_div(timeout_ms, NSEC_PER_MSEC);
__pm_wakeup_event(&wl->ws, timeout_ms);
} else {
__pm_stay_awake(&wl->ws);
}
wakelocks_lru_most_recent(wl);
out:
mutex_unlock(&wakelocks_lock);
return ret;
}
int pm_wake_unlock(const char *buf)
{
struct wakelock *wl;
size_t len;
int ret = 0;
if (!capable(CAP_BLOCK_SUSPEND))
return -EPERM;
len = strlen(buf);
if (!len)
return -EINVAL;
if (buf[len-1] == '\n')
len--;
if (!len)
return -EINVAL;
mutex_lock(&wakelocks_lock);
wl = wakelock_lookup_add(buf, len, false);
if (IS_ERR(wl)) {
ret = PTR_ERR(wl);
goto out;
}
__pm_relax(&wl->ws);
wakelocks_lru_most_recent(wl);
wakelocks_gc();
out:
mutex_unlock(&wakelocks_lock);
return ret;
}
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/printk/braille.c
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/console.h>
#include <linux/string.h>
#include "console_cmdline.h"
#include "braille.h"
char *_braille_console_setup(char **str, char **brl_options)
{
if (!memcmp(*str, "brl,", 4)) {
*brl_options = "";
*str += 4;
} else if (!memcmp(str, "brl=", 4)) {
*brl_options = *str + 4;
*str = strchr(*brl_options, ',');
if (!*str)
pr_err("need port name after brl=\n");
else
*((*str)++) = 0;
} else
return NULL;
return *str;
}
int
_braille_register_console(struct console *console, struct console_cmdline *c)
{
int rtn = 0;
if (c->brl_options) {
console->flags |= CON_BRL;
rtn = braille_register_console(console, c->index, c->options,
c->brl_options);
}
return rtn;
}
int
_braille_unregister_console(struct console *console)
{
if (console->flags & CON_BRL)
return braille_unregister_console(console);
return 0;
}
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/printk/braille.h
#ifndef _PRINTK_BRAILLE_H
#define _PRINTK_BRAILLE_H
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
static inline void
braille_set_options(struct console_cmdline *c, char *brl_options)
{
c->brl_options = brl_options;
}
char *
_braille_console_setup(char **str, char **brl_options);
int
_braille_register_console(struct console *console, struct console_cmdline *c);
int
_braille_unregister_console(struct console *console);
#else
static inline void
braille_set_options(struct console_cmdline *c, char *brl_options)
{
}
static inline char *
_braille_console_setup(char **str, char **brl_options)
{
return NULL;
}
static inline int
_braille_register_console(struct console *console, struct console_cmdline *c)
{
return 0;
}
static inline int
_braille_unregister_console(struct console *console)
{
return 0;
}
#endif
#endif
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/printk/console_cmdline.h
#ifndef _CONSOLE_CMDLINE_H
#define _CONSOLE_CMDLINE_H
struct console_cmdline
{
char name[16]; /* Name of the driver */
int index; /* Minor dev. to use */
char *options; /* Options for the driver */
#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
char *brl_options; /* Options for braille driver */
#endif
};
#endif
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/printk/Makefile
obj-y = printk.o
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
C:\Users\Admin\Desktop\linux-4.2.y-new\linux-4.2.y\kernel\/printk/printk.c
/*
* linux/kernel/printk.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* Modified to make sys_syslog() more flexible: added commands to
* return the last 4k of kernel messages, regardless of whether
* they've been read or not. Added option to suppress kernel printk's
* to the console. Added hook for sending the console messages
* elsewhere, in preparation for a serial line console (someday).
* Ted Ts'o, 2/11/93.
* Modified for sysctl support, 1/8/97, Chris Horn.
* Fixed SMP synchronization, 08/08/99, Manfred Spraul
* manfred@colorfullife.com
* Rewrote bits to get rid of console_lock
* 01Mar01 Andrew Morton
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/nmi.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h> /* For in_interrupt() */
#include <linux/delay.h>
#include <linux/smp.h>
#include <linux/security.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/syscalls.h>
#include <linux/kexec.h>
#include <linux/kdb.h>
#include <linux/ratelimit.h>
#include <linux/kmsg_dump.h>
#include <linux/syslog.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/rculist.h>
#include <linux/poll.h>
#include <linux/irq_work.h>
#include <linux/utsname.h>
#include <linux/ctype.h>
#include <linux/uio.h>
#include <asm/uaccess.h>
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
#include "console_cmdline.h"
#include "braille.h"
int console_printk[4] = {
CONSOLE_LOGLEVEL_DEFAULT, /* console_loglevel */
MESSAGE_LOGLEVEL_DEFAULT, /* default_message_loglevel */
CONSOLE_LOGLEVEL_MIN, /* minimum_console_loglevel */
CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
};
/*
* Low level drivers may need that to know if they can schedule in
* their unblank() callback or not. So let's export it.
*/
int oops_in_progress;
EXPORT_SYMBOL(oops_in_progress);
/*
* console_sem protects the console_drivers list, and also
* provides serialisation for access to the entire console
* driver system.
*/
static DEFINE_SEMAPHORE(console_sem);
struct console *console_drivers;
EXPORT_SYMBOL_GPL(console_drivers);
#ifdef CONFIG_LOCKDEP
static struct lockdep_map console_lock_dep_map = {
.name = "console_lock"
};
#endif
/*
* Number of registered extended console drivers.
*
* If extended consoles are present, in-kernel cont reassembly is disabled
* and each fragment is stored as a separate log entry with proper
* continuation flag so that every emitted message has full metadata. This
* doesn't change the result for regular consoles or /proc/kmsg. For
* /dev/kmsg, as long as the reader concatenates messages according to
* consecutive continuation flags, the end result should be the same too.
*/
static int nr_ext_console_drivers;
/*
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
* macros instead of functions so that _RET_IP_ contains useful information.
*/
#define down_console_sem() do { \
down(&console_sem);\
mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);\
} while (0)
static int __down_trylock_console_sem(unsigned long ip)
{
if (down_trylock(&console_sem))
return 1;
mutex_acquire(&console_lock_dep_map, 0, 1, ip);
return 0;
}
#define down_trylock_console_sem() __down_trylock_console_sem(_RET_IP_)
#define up_console_sem() do { \
mutex_release(&console_lock_dep_map, 1, _RET_IP_);\
up(&console_sem);\
} while (0)
/*
* This is used for debugging the mess that is the VT code by
* keeping track if we have the console semaphore held. It's
* definitely not the perfect debug tool (we don't know if _WE_
* hold it and are racing, but it helps tracking those weird code
* paths in the console code where we end up in places I want
* locked without the console sempahore held).
*/
static int console_locked, console_suspended;
/*
* If exclusive_console is non-NULL then only this console is to be printed to.
*/
static struct console *exclusive_console;
/*
* Array of consoles built from command line options (console=)
*/
#define MAX_CMDLINECONSOLES 8
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
static int selected_console = -1;
static int preferred_console = -1;
int console_set_on_cmdline;
EXPORT_SYMBOL(console_set_on_cmdline);
/* Flag: console code may call schedule() */
static int console_may_schedule;
/*
* The printk log buffer consists of a chain of concatenated variable
* length records. Every record starts with a record header, containing
* the overall length of the record.
*
* The heads to the first and last entry in the buffer, as well as the
* sequence numbers of these entries are maintained when messages are
* stored.
*
* If the heads indicate available messages, the length in the header
* tells the start next message. A length == 0 for the next message
* indicates a wrap-around to the beginning of the buffer.
*
* Every record carries the monotonic timestamp in microseconds, as well as
* the standard userspace syslog level and syslog facility. The usual
* kernel messages use LOG_KERN; userspace-injected messages always carry
* a matching syslog facility, by default LOG_USER. The origin of every
* message can be reliably determined that way.
*
* The human readable log message directly follows the message header. The
* length of the message text is stored in the header, the stored message
* is not terminated.
*
* Optionally, a message can carry a dictionary of properties (key/value pairs),
* to provide userspace with a machine-readable message context.
*
* Examples for well-defined, commonly used property names are:
* DEVICE=b12:8 device identifier
* b12:8 block dev_t
* c127:3 char dev_t
* n8 netdev ifindex
* +sound:card0 subsystem:devname
* SUBSYSTEM=pci driver-core subsystem name
*
* Valid characters in property names are [a-zA-Z0-9.-_]. The plain text value
* follows directly after a '=' character. Every property is terminated by
* a '\0' character. The last property is not terminated.
*
* Example of a message structure:
* 0000 ff 8f 00 00 00 00 00 00 monotonic time in nsec
* 0008 34 00 record is 52 bytes long
* 000a 0b 00 text is 11 bytes long
* 000c 1f 00 dictionary is 23 bytes long
* 000e 03 00 LOG_KERN (facility) LOG_ERR (level)
* 0010 69 74 27 73 20 61 20 6c "it's a l"
* 69 6e 65 "ine"
* 001b 44 45 56 49 43 "DEVIC"
* 45 3d 62 38 3a 32 00 44 "E=b8:2\0D"
* 52 49 56 45 52 3d 62 75 "RIVER=bu"
* 67 "g"
* 0032 00 00 00 padding to next message header
*
* The 'struct printk_log' buffer header must never be directly exported to
* userspace, it is a kernel-private implementation detail that might
* need to be changed in the future, when the requirements change.
*
* /dev/kmsg exports the structured data in the following line format:
* "<level>,<sequnum>,<timestamp>,<contflag>[,additional_values, ... ];<message text>\n"
*
* Users of the export format should ignore possible additional values
* separated by ',', and find the message after the ';' character.
*
* The optional key/value pairs are attached as continuation lines starting
* with a space character and terminated by a newline. All possible
* non-prinatable characters are escaped in the "\xff" notation.
*/
enum log_flags {
LOG_NOCONS = 1, /* already flushed, do not print to console */
LOG_NEWLINE = 2, /* text ended with a newline */
LOG_PREFIX = 4, /* text started with a prefix */
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
struct printk_log {
u64 ts_nsec; /* timestamp in nanoseconds */
u16 len; /* length of entire record */
u16 text_len; /* length of text buffer */
u16 dict_len; /* length of dictionary buffer */
u8 facility; /* syslog facility */
u8 flags:5; /* internal record flags */
u8 level:3; /* syslog level */
};
/*
* The logbuf_lock protects kmsg buffer, indices, counters. This can be taken
* within the scheduler's rq lock. It must be released before calling
* console_unlock() or anything else that might wake up a process.
*/
static DEFINE_RAW_SPINLOCK(logbuf_lock);
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
static u64 syslog_seq;
static u32 syslog_idx;
static enum log_flags syslog_prev;
static size_t syslog_partial;
/* index and sequence number of the first record stored in the buffer */
static u64 log_first_seq;
static u32 log_first_idx;
/* index and sequence number of the next record to store in the buffer */
static u64 log_next_seq;
static u32 log_next_idx;
/* the next printk record to write to the console */
static u64 console_seq;
static u32 console_idx;
static enum log_flags console_prev;
/* the next printk record to read after the last 'clear' command */
static u64 clear_seq;
static u32 clear_idx;
#define PREFIX_MAX 32
#define LOG_LINE_MAX (1024 - PREFIX_MAX)
/* record buffer */
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
#define LOG_ALIGN 4
#else
#define LOG_ALIGN __alignof__(struct printk_log)
#endif
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
/* Return log buffer address */
char *log_buf_addr_get(void)
{
return log_buf;
}
/* Return log buffer size */
u32 log_buf_len_get(void)
{
return log_buf_len;
}
/* human readable text of the record */
static char *log_text(const struct printk_log *msg)
{
return (char *)msg + sizeof(struct printk_log);
}
/* optional key/value pair dictionary attached to the record */
static char *log_dict(const struct printk_log *msg)
{
return (char *)msg + sizeof(struct printk_log) + msg->text_len;
}
/* get record by index; idx must point to valid msg */
static struct printk_log *log_from_idx(u32 idx)
{
struct printk_log *msg = (struct printk_log *)(log_buf + idx);
/*
* A length == 0 record is the end of buffer marker. Wrap around and
* read the message at the start of the buffer.
*/
if (!msg->len)
return (struct printk_log *)log_buf;
return msg;
}
/* get next record; idx must point to valid msg */
static u32 log_next(u32 idx)
{
struct printk_log *msg = (struct printk_log *)(log_buf + idx);
/* length == 0 indicates the end of the buffer; wrap */
/*
* A length == 0 record is the end of buffer marker. Wrap around and
* read the message at the start of the buffer as *this* one, and
* return the one after that.
*/
if (!msg->len) {
msg = (struct printk_log *)log_buf;
return msg->len;
}
return idx + msg->len;
}
/*
* Check whether there is enough free space for the given message.
*
* The same values of first_idx and next_idx mean that the buffer
* is either empty or full.
*
* If the buffer is empty, we must respect the position of the indexes.
* They cannot be reset to the beginning of the buffer.
*/
static int logbuf_has_space(u32 msg_size, bool empty)
{
u32 free;
if (log_next_idx > log_first_idx || empty)
free = max(log_buf_len - log_next_idx, log_first_idx);
else
free = log_first_idx - log_next_idx;
/*
* We need space also for an empty header that signalizes wrapping
* of the buffer.
*/
return free >= msg_size + sizeof(struct printk_log);
}
static int log_make_free_space(u32 msg_size)
{
while (log_first_seq < log_next_seq) {
if (logbuf_has_space(msg_size, false))
return 0;
/* drop old messages until we have enough contiguous space */
log_first_idx = log_next(log_first_idx);
log_first_seq++;
}
/* sequence numbers are equal, so the log buffer is empty */
if (logbuf_has_space(msg_size, true))
return 0;
return -ENOMEM;
}
/* compute the message size including the padding bytes */
static u32 msg_used_size(u16 text_len, u16 dict_len, u32 *pad_len)
{
u32 size;
size = sizeof(struct printk_log) + text_len + dict_len;
*pad_len = (-size) & (LOG_ALIGN - 1);
size += *pad_len;
return size;
}
/*
* Define how much of the log buffer we could take at maximum. The value
* must be greater than two. Note that only half of the buffer is available
* when the index points to the middle.
*/
#define MAX_LOG_TAKE_PART 4
static const char trunc_msg[] = "<truncated>";
static u32 truncate_msg(u16 *text_len, u16 *trunc_msg_len,
u16 *dict_len, u32 *pad_len)
{
/*
* The message should not take the whole buffer. Otherwise, it might
* get removed too soon.
*/
u32 max_text_len = log_buf_len / MAX_LOG_TAKE_PART;
if (*text_len > max_text_len)
*text_len = max_text_len;
/* enable the warning message */
*trunc_msg_len = strlen(trunc_msg);
/* disable the "dict" completely */
*dict_len = 0;
/* compute the size again, count also the warning message */
return msg_used_size(*text_len + *trunc_msg_len, 0, pad_len);
}
/* insert record into the buffer, discard old ones, update heads */
static int log_store(int facility, int level,
enum log_flags flags, u64 ts_nsec,
const char *dict, u16 dict_len,
const char *text, u16 text_len)
{
struct printk_log *msg;
u32 size, pad_len;
u16 trunc_msg_len = 0;
/* number of '\0' padding bytes to next message */
size = msg_used_size(text_len, dict_len, &pad_len);
if (log_make_free_space(size)) {
/* truncate the message if it is too long for empty buffer */
size = truncate_msg(&text_len, &trunc_msg_len,
&dict_len, &pad_len);
/* survive when the log buffer is too small for trunc_msg */
if (log_make_free_space(size))
return 0;
}
if (log_next_idx + size + sizeof(struct printk_log) > log_buf_len) {
/*
* This message + an additional empty header does not fit
* at the end of the buffer. Add an empty header with len == 0
* to signify a wrap around.
*/
memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
log_next_idx = 0;
}
/* fill message */
msg = (struct printk_log *)(log_buf + log_next_idx);
memcpy(log_text(msg), text, text_len);
msg->text_len = text_len;
if (trunc_msg_len) {
memcpy(log_text(msg) + text_len, trunc_msg, trunc_msg_len);
msg->text_len += trunc_msg_len;
}
memcpy(log_dict(msg), dict, dict_len);
msg->dict_len = dict_len;
msg->facility = facility;
msg->level = level & 7;
msg->flags = flags & 0x1f;
if (ts_nsec > 0)
msg->ts_nsec = ts_nsec;
else
msg->ts_nsec = local_clock();
memset(log_dict(msg) + dict_len, 0, pad_len);
msg->len = size;
/* insert message */
log_next_idx += msg->len;
log_next_seq++;
return msg->text_len;
}
int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
static int syslog_action_restricted(int type)
{
if (dmesg_restrict)
return 1;
/*
* Unless restricted, we allow "read all" and "get buffer size"
* for everybody.
*/
return type != SYSLOG_ACTION_READ_ALL &&
type != SYSLOG_ACTION_SIZE_BUFFER;
}
int check_syslog_permissions(int type, int source)
{
/*
* If this is from /proc/kmsg and we've already opened it, then we've
* already done the capabilities checks at open time.
*/
if (source == SYSLOG_FROM_PROC && type != SYSLOG_ACTION_OPEN)
goto ok;
if (syslog_action_restricted(type)) {
if (capable(CAP_SYSLOG))
goto ok;
/*
* For historical reasons, accept CAP_SYS_ADMIN too, with
* a warning.
*/
if (capable(CAP_SYS_ADMIN)) {
pr_warn_once("%s (%d): Attempt to access syslog with "
"CAP_SYS_ADMIN but no CAP_SYSLOG "
"(deprecated).\n",
current->comm, task_pid_nr(current));
goto ok;
}
return -EPERM;
}
ok:
return security_syslog(type);
}
static void append_char(char **pp, char *e, char c)
{
if (*pp < e)
*(*pp)++ = c;
}
static ssize_t msg_print_ext_header(char *buf, size_t size,
struct printk_log *msg, u64 seq,
enum log_flags prev_flags)
{
u64 ts_usec = msg->ts_nsec;
char cont = '-';
do_div(ts_usec, 1000);
/*
* If we couldn't merge continuation line fragments during the print,
* export the stored flags to allow an optional external merge of the
* records. Merging the records isn't always neccessarily correct, like
* when we hit a race during printing. In most cases though, it produces
* better readable output. 'c' in the record flags mark the first
* fragment of a line, '+' the following.
*/
if (msg->flags & LOG_CONT && !(prev_flags & LOG_CONT))
cont = 'c';
else if ((msg->flags & LOG_CONT) ||
((prev_flags & LOG_CONT) && !(msg->flags & LOG_PREFIX)))
cont = '+';
return scnprintf(buf, size, "%u,%llu,%llu,%c;",
(msg->facility << 3) | msg->level, seq, ts_usec, cont);
}
static ssize_t msg_print_ext_body(char *buf, size_t size,
char *dict, size_t dict_len,
char *text, size_t text_len)
{
char *p = buf, *e = buf + size;
size_t i;
/* escape non-printable characters */
for (i = 0; i < text_len; i++) {
unsigned char c = text[i];
if (c < ' ' || c >= 127 || c == '\\')
p += scnprintf(p, e - p, "\\x%02x", c);
else
append_char(&p, e, c);
}
append_char(&p, e, '\n');
if (dict_len) {
bool line = true;
for (i = 0; i < dict_len; i++) {
unsigned char c = dict[i];
if (line) {
append_char(&p, e, ' ');
line = false;
}
if (c == '\0') {
append_char(&p, e, '\n');
line = true;
continue;
}
if (c < ' ' || c >= 127 || c == '\\') {
p += scnprintf(p, e - p, "\\x%02x", c);
continue;
}
append_char(&p, e, c);
}
append_char(&p, e, '\n');
}
return p - buf;
}
/* /dev/kmsg - userspace message inject/listen interface */
struct devkmsg_user {
u64 seq;
u32 idx;
enum log_flags prev;
struct mutex lock;
char buf[CONSOLE_EXT_LOG_MAX];
};
static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
{
char *buf, *line;
int i;
int level = default_message_loglevel;
int facility = 1; /* LOG_USER */
size_t len = iov_iter_count(from);
ssize_t ret = len;
if (len > LOG_LINE_MAX)
return -EINVAL;
buf = kmalloc(len+1, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
buf[len] = '\0';
if (copy_from_iter(buf, len, from) != len) {
kfree(buf);
return -EFAULT;
}
/*
* Extract and skip the syslog prefix <[0-9]*>. Coming from userspace
* the decimal value represents 32bit, the lower 3 bit are the log
* level, the rest are the log facility.
*
* If no prefix or no userspace facility is specified, we
* enforce LOG_USER, to be able to reliably distinguish
* kernel-generated messages from userspace-injected ones.
*/
line = buf;
if (line[0] == '<') {
char *endp = NULL;
i = simple_strtoul(line+1, &endp, 10);
if (endp && endp[0] == '>') {
level = i & 7;
if (i >> 3)
facility = i >> 3;
endp++;
len -= endp - line;
line = endp;
}
}
printk_emit(facility, level, NULL, 0, "%s", line);
kfree(buf);
return ret;
}
static ssize_t devkmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct devkmsg_user *user = file->private_data;
struct printk_log *msg;
size_t len;
ssize_t ret;
if (!user)
return -EBADF;
ret = mutex_lock_interruptible(&user->lock);
if (ret)
return ret;
raw_spin_lock_irq(&logbuf_lock);
while (user->seq == log_next_seq) {
if (file->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
raw_spin_unlock_irq(&logbuf_lock);
goto out;
}
raw_spin_unlock_irq(&logbuf_lock);
ret = wait_event_interruptible(log_wait,
user->seq != log_next_seq);
if (ret)
goto out;
raw_spin_lock_irq(&logbuf_lock);
}
if (user->seq < log_first_seq) {
/* our last seen message is gone, return error and reset */
user->idx = log_first_idx;
user->seq = log_first_seq;
ret = -EPIPE;
raw_spin_unlock_irq(&logbuf_lock);
goto out;
}
msg = log_from_idx(user->idx);
len = msg_print_ext_header(user->buf, sizeof(user->buf),
msg, user->seq, user->prev);
len += msg_print_ext_body(user->buf + len, sizeof(user->buf) - len,
log_dict(msg), msg->dict_len,
log_text(msg), msg->text_len);
user->prev = msg->flags;
user->idx = log_next(user->idx);
user->seq++;
raw_spin_unlock_irq(&logbuf_lock);
if (len > count) {
ret = -EINVAL;
goto out;
}
if (copy_to_user(buf, user->buf, len)) {
ret = -EFAULT;
goto out;
}
ret = len;
out:
mutex_unlock(&user->lock);
return ret;
}
static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
{
struct devkmsg_user *user = file->private_data;
loff_t ret = 0;
if (!user)
return -EBADF;
if (offset)
return -ESPIPE;
raw_spin_lock_irq(&logbuf_lock);
switch (whence) {
case SEEK_SET:
/* the first record */
user->idx = log_first_idx;
user->seq = log_first_seq;
break;
case SEEK_DATA:
/*
* The first record after the last SYSLOG_ACTION_CLEAR,
* like issued by 'dmesg -c'. Reading /dev/kmsg itself
* changes no global state, and does not clear anything.
*/
user->idx = clear_idx;
user->seq = clear_seq;
break;
case SEEK_END:
/* after the last record */
user->idx = log_next_idx;
user->seq = log_next_seq;
break;
default:
ret = -EINVAL;
}
raw_spin_unlock_irq(&logbuf_lock);
return ret;
}
static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
{
struct devkmsg_user *user = file->private_data;
int ret = 0;
if (!user)
return POLLERR|POLLNVAL;
poll_wait(file, &log_wait, wait);
raw_spin_lock_irq(&logbuf_lock);
if (user->seq < log_next_seq) {
/* return error when data has vanished underneath us */
if (user->seq < log_first_seq)
ret = POLLIN|POLLRDNORM|POLLERR|POLLPRI;
else
ret = POLLIN|POLLRDNORM;
}
raw_spin_unlock_irq(&logbuf_lock);
return ret;
}
static int devkmsg_open(struct inode *inode, struct file *file)
{
struct devkmsg_user *user;
int err;
/* write-only does not need any file context */
if ((file->f_flags & O_ACCMODE) == O_WRONLY)
return 0;
err = check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
SYSLOG_FROM_READER);
if (err)
return err;
user = kmalloc(sizeof(struct devkmsg_user), GFP_KERNEL);
if (!user)
return -ENOMEM;
mutex_init(&user->lock);
raw_spin_lock_irq(&logbuf_lock);
user->idx = log_first_idx;
user->seq = log_first_seq;
raw_spin_unlock_irq(&logbuf_lock);
file->private_data = user;
return 0;
}
static int devkmsg_release(struct inode *inode, struct file *file)
{
struct devkmsg_user *user = file->private_data;
if (!user)
return 0;
mutex_destroy(&user->lock);
kfree(user);
return 0;
}
const struct file_operations kmsg_fops = {
.open = devkmsg_open,
.read = devkmsg_read,
.write_iter = devkmsg_write,
.llseek = devkmsg_llseek,
.poll = devkmsg_poll,
.release = devkmsg_release,
};
#ifdef CONFIG_KEXEC
/*
* This appends the listed symbols to /proc/vmcore
*
* /proc/vmcore is used by various utilities, like crash and makedumpfile to
* obtain access to symbols that are otherwise very difficult to locate. These
* symbols are specifically used so that utilities can access and extract the
* dmesg log from a vmcore file after a crash.
*/
void log_buf_kexec_setup(void)
{
VMCOREINFO_SYMBOL(log_buf);
VMCOREINFO_SYMBOL(log_buf_len);
VMCOREINFO_SYMBOL(log_first_idx);
VMCOREINFO_SYMBOL(log_next_idx);
/*
* Export struct printk_log size and field offsets. User space tools can
* parse it and detect any changes to structure down the line.
*/
VMCOREINFO_STRUCT_SIZE(printk_log);
VMCOREINFO_OFFSET(printk_log, ts_nsec);
VMCOREINFO_OFFSET(printk_log, len);
VMCOREINFO_OFFSET(printk_log, text_len);
VMCOREINFO_OFFSET(printk_log, dict_len);
}
#endif
/* requested log_buf_len from kernel cmdline */
static unsigned long __initdata new_log_buf_len;
/* we practice scaling the ring buffer by powers of 2 */
static void __init log_buf_len_update(unsigned size)
{
if (size)
size = roundup_pow_of_two(size);
if (size > log_buf_len)
new_log_buf_len = size;
}
/* save requested log_buf_len since it's too early to process it */
static int __init log_buf_len_setup(char *str)
{
unsigned size = memparse(str, &str);
log_buf_len_update(size);
return 0;
}
early_param("log_buf_len", log_buf_len_setup);
#ifdef CONFIG_SMP
#define __LOG_CPU_MAX_BUF_LEN (1 << CONFIG_LOG_CPU_MAX_BUF_SHIFT)
static void __init log_buf_add_cpu(void)
{
unsigned int cpu_extra;
/*
* archs should set up cpu_possible_bits properly with
* set_cpu_possible() after setup_arch() but just in
* case lets ensure this is valid.
*/
if (num_possible_cpus() == 1)
return;
cpu_extra = (num_possible_cpus() - 1) * __LOG_CPU_MAX_BUF_LEN;
/* by default this will only continue through for large > 64 CPUs */
if (cpu_extra <= __LOG_BUF_LEN / 2)
return;
pr_info("log_buf_len individual max cpu contribution: %d bytes\n",
__LOG_CPU_MAX_BUF_LEN);
pr_info("log_buf_len total cpu_extra contributions: %d bytes\n",
cpu_extra);
pr_info("log_buf_len min size: %d bytes\n", __LOG_BUF_LEN);
log_buf_len_update(cpu_extra + __LOG_BUF_LEN);
}
#else /* !CONFIG_SMP */
static inline void log_buf_add_cpu(void) {}
#endif /* CONFIG_SMP */
void __init setup_log_buf(int early)
{
unsigned long flags;
char *new_log_buf;
int free;
if (log_buf != __log_buf)
return;
if (!early && !new_log_buf_len)
log_buf_add_cpu();
if (!new_log_buf_len)
return;
if (early) {
new_log_buf =
memblock_virt_alloc(new_log_buf_len, LOG_ALIGN);
} else {
new_log_buf = memblock_virt_alloc_nopanic(new_log_buf_len,
LOG_ALIGN);
}
if (unlikely(!new_log_buf)) {
pr_err("log_buf_len: %ld bytes not available\n",
new_log_buf_len);
return;
}
raw_spin_lock_irqsave(&logbuf_lock, flags);
log_buf_len = new_log_buf_len;
log_buf = new_log_buf;
new_log_buf_len = 0;
free = __LOG_BUF_LEN - log_next_idx;
memcpy(log_buf, __log_buf, __LOG_BUF_LEN);
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
pr_info("log_buf_len: %d bytes\n", log_buf_len);
pr_info("early log buf free: %d(%d%%)\n",
free, (free * 100) / __LOG_BUF_LEN);
}
static bool __read_mostly ignore_loglevel;
static int __init ignore_loglevel_setup(char *str)
{
ignore_loglevel = true;
pr_info("debug: ignoring loglevel setting.\n");
return 0;
}
early_param("ignore_loglevel", ignore_loglevel_setup);
module_param(ignore_loglevel, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ignore_loglevel,
"ignore loglevel setting (prints all kernel messages to the console)");
#ifdef CONFIG_BOOT_PRINTK_DELAY
static int boot_delay; /* msecs delay after each printk during bootup */
static unsigned long long loops_per_msec; /* based on boot_delay */
static int __init boot_delay_setup(char *str)
{
unsigned long lpj;
lpj = preset_lpj ? preset_lpj : 1000000; /* some guess */
loops_per_msec = (unsigned long long)lpj / 1000 * HZ;
get_option(&str, &boot_delay);
if (boot_delay > 10 * 1000)
boot_delay = 0;
pr_debug("boot_delay: %u, preset_lpj: %ld, lpj: %lu, "
"HZ: %d, loops_per_msec: %llu\n",
boot_delay, preset_lpj, lpj, HZ, loops_per_msec);
return 0;
}
early_param("boot_delay", boot_delay_setup);
static void boot_delay_msec(int level)
{
unsigned long long k;
unsigned long timeout;
if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
|| (level >= console_loglevel && !ignore_loglevel)) {
return;
}
k = (unsigned long long)loops_per_msec * boot_delay;
timeout = jiffies + msecs_to_jiffies(boot_delay);
while (k) {
k--;
cpu_relax();
/*
* use (volatile) jiffies to prevent
* compiler reduction; loop termination via jiffies
* is secondary and may or may not happen.
*/
if (time_after(jiffies, timeout))
break;
touch_nmi_watchdog();
}
}
#else
static inline void boot_delay_msec(int level)
{
}
#endif
static bool printk_time = IS_ENABLED(CONFIG_PRINTK_TIME);
module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
static size_t print_time(u64 ts, char *buf)
{
unsigned long rem_nsec;
if (!printk_time)
return 0;
rem_nsec = do_div(ts, 1000000000);
if (!buf)
return snprintf(NULL, 0, "[%5lu.000000] ", (unsigned long)ts);
return sprintf(buf, "[%5lu.%06lu] ",
(unsigned long)ts, rem_nsec / 1000);
}
static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
{
size_t len = 0;
unsigned int prefix = (msg->facility << 3) | msg->level;
if (syslog) {
if (buf) {
len += sprintf(buf, "<%u>", prefix);
} else {
len += 3;
if (prefix > 999)
len += 3;
else if (prefix > 99)
len += 2;
else if (prefix > 9)
len++;
}
}
len += print_time(msg->ts_nsec, buf ? buf + len : NULL);
return len;
}
static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size)
{
const char *text = log_text(msg);
size_t text_size = msg->text_len;
bool prefix = true;
bool newline = true;
size_t len = 0;
if ((prev & LOG_CONT) && !(msg->flags & LOG_PREFIX))
prefix = false;
if (msg->flags & LOG_CONT) {
if ((prev & LOG_CONT) && !(prev & LOG_NEWLINE))
prefix = false;
if (!(msg->flags & LOG_NEWLINE))
newline = false;
}
do {
const char *next = memchr(text, '\n', text_size);
size_t text_len;
if (next) {
text_len = next - text;
next++;
text_size -= next - text;
} else {
text_len = text_size;
}
if (buf) {
if (print_prefix(msg, syslog, NULL) +
text_len + 1 >= size - len)
break;
if (prefix)
len += print_prefix(msg, syslog, buf + len);
memcpy(buf + len, text, text_len);
len += text_len;
if (next || newline)
buf[len++] = '\n';
} else {
/* SYSLOG_ACTION_* buffer size only calculation */
if (prefix)
len += print_prefix(msg, syslog, NULL);
len += text_len;
if (next || newline)
len++;
}
prefix = true;
text = next;
} while (text);
return len;
}
static int syslog_print(char __user *buf, int size)
{
char *text;
struct printk_log *msg;
int len = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
return -ENOMEM;
while (size > 0) {
size_t n;
size_t skip;
raw_spin_lock_irq(&logbuf_lock);
if (syslog_seq < log_first_seq) {
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
syslog_idx = log_first_idx;
syslog_prev = 0;
syslog_partial = 0;
}
if (syslog_seq == log_next_seq) {
raw_spin_unlock_irq(&logbuf_lock);
break;
}
skip = syslog_partial;
msg = log_from_idx(syslog_idx);
n = msg_print_text(msg, syslog_prev, true, text,
LOG_LINE_MAX + PREFIX_MAX);
if (n - syslog_partial <= size) {
/* message fits into buffer, move forward */
syslog_idx = log_next(syslog_idx);
syslog_seq++;
syslog_prev = msg->flags;
n -= syslog_partial;
syslog_partial = 0;
} else if (!len){
/* partial read(), remember position */
n = size;
syslog_partial += n;
} else
n = 0;
raw_spin_unlock_irq(&logbuf_lock);
if (!n)
break;
if (copy_to_user(buf, text + skip, n)) {
if (!len)
len = -EFAULT;
break;
}
len += n;
size -= n;
buf += n;
}
kfree(text);
return len;
}
static int syslog_print_all(char __user *buf, int size, bool clear)
{
char *text;
int len = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
return -ENOMEM;
raw_spin_lock_irq(&logbuf_lock);
if (buf) {
u64 next_seq;
u64 seq;
u32 idx;
enum log_flags prev;
if (clear_seq < log_first_seq) {
/* messages are gone, move to first available one */
clear_seq = log_first_seq;
clear_idx = log_first_idx;
}
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
*/
seq = clear_seq;
idx = clear_idx;
prev = 0;
while (seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
len += msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
idx = log_next(idx);
seq++;
}
/* move first record forward until length fits into the buffer */
seq = clear_seq;
idx = clear_idx;
prev = 0;
while (len > size && seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
len -= msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
idx = log_next(idx);
seq++;
}
/* last message fitting into this dump */
next_seq = log_next_seq;
len = 0;
while (len >= 0 && seq < next_seq) {
struct printk_log *msg = log_from_idx(idx);
int textlen;
textlen = msg_print_text(msg, prev, true, text,
LOG_LINE_MAX + PREFIX_MAX);
if (textlen < 0) {
len = textlen;
break;
}
idx = log_next(idx);
seq++;
prev = msg->flags;
raw_spin_unlock_irq(&logbuf_lock);
if (copy_to_user(buf + len, text, textlen))
len = -EFAULT;
else
len += textlen;
raw_spin_lock_irq(&logbuf_lock);
if (seq < log_first_seq) {
/* messages are gone, move to next one */
seq = log_first_seq;
idx = log_first_idx;
prev = 0;
}
}
}
if (clear) {
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
raw_spin_unlock_irq(&logbuf_lock);
kfree(text);
return len;
}
int do_syslog(int type, char __user *buf, int len, int source)
{
bool clear = false;
static int saved_console_loglevel = LOGLEVEL_DEFAULT;
int error;
error = check_syslog_permissions(type, source);
if (error)
goto out;
switch (type) {
case SYSLOG_ACTION_CLOSE: /* Close log */
break;
case SYSLOG_ACTION_OPEN: /* Open log */
break;
case SYSLOG_ACTION_READ: /* Read from log */
error = -EINVAL;
if (!buf || len < 0)
goto out;
error = 0;
if (!len)
goto out;
if (!access_ok(VERIFY_WRITE, buf, len)) {
error = -EFAULT;
goto out;
}
error = wait_event_interruptible(log_wait,
syslog_seq != log_next_seq);
if (error)
goto out;
error = syslog_print(buf, len);
break;
/* Read/clear last kernel messages */
case SYSLOG_ACTION_READ_CLEAR:
clear = true;
/* FALL THRU */
/* Read last kernel messages */
case SYSLOG_ACTION_READ_ALL:
error = -EINVAL;
if (!buf || len < 0)
goto out;
error = 0;
if (!len)
goto out;
if (!access_ok(VERIFY_WRITE, buf, len)) {
error = -EFAULT;
goto out;
}
error = syslog_print_all(buf, len, clear);
break;
/* Clear ring buffer */
case SYSLOG_ACTION_CLEAR:
syslog_print_all(NULL, 0, true);
break;
/* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_OFF:
if (saved_console_loglevel == LOGLEVEL_DEFAULT)
saved_console_loglevel = console_loglevel;
console_loglevel = minimum_console_loglevel;
break;
/* Enable logging to console */
case SYSLOG_ACTION_CONSOLE_ON:
if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
console_loglevel = saved_console_loglevel;
saved_console_loglevel = LOGLEVEL_DEFAULT;
}
break;
/* Set level of messages printed to console */
case SYSLOG_ACTION_CONSOLE_LEVEL:
error = -EINVAL;
if (len < 1 || len > 8)
goto out;
if (len < minimum_console_loglevel)
len = minimum_console_loglevel;
console_loglevel = len;
/* Implicitly re-enable logging to console */
saved_console_loglevel = LOGLEVEL_DEFAULT;
error = 0;
break;
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
raw_spin_lock_irq(&logbuf_lock);
if (syslog_seq < log_first_seq) {
/* messages are gone, move to first one */
syslog_seq = log_first_seq;
syslog_idx = log_first_idx;
syslog_prev = 0;
syslog_partial = 0;
}
if (source == SYSLOG_FROM_PROC) {
/*
* Short-cut for poll(/"proc/kmsg") which simply checks
* for pending data, not the size; return the count of
* records, not the length.
*/
error = log_next_seq - syslog_seq;
} else {
u64 seq = syslog_seq;
u32 idx = syslog_idx;
enum log_flags prev = syslog_prev;
error = 0;
while (seq < log_next_seq) {
struct printk_log *msg = log_from_idx(idx);
error += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
seq++;
prev = msg->flags;
}
error -= syslog_partial;
}
raw_spin_unlock_irq(&logbuf_lock);
break;
/* Size of the log buffer */
case SYSLOG_ACTION_SIZE_BUFFER:
error = log_buf_len;
break;
default:
error = -EINVAL;
break;
}
out:
return error;
}
SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
{
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
/*
* Call the console drivers, asking them to write out
* log_buf[start] to log_buf[end - 1].
* The console_lock must be held.
*/
static void call_console_drivers(int level,
const char *ext_text, size_t ext_len,
const char *text, size_t len)
{
struct console *con;
trace_console(text, len);
if (level >= console_loglevel && !ignore_loglevel)
return;
if (!console_drivers)
return;
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
if (!(con->flags & CON_ENABLED))
continue;
if (!con->write)
continue;
if (!cpu_online(smp_processor_id()) &&
!(con->flags & CON_ANYTIME))
continue;
if (con->flags & CON_EXTENDED)
con->write(con, ext_text, ext_len);
else
con->write(con, text, len);
}
}
/*
* Zap console related locks when oopsing.
* To leave time for slow consoles to print a full oops,
* only zap at most once every 30 seconds.
*/
static void zap_locks(void)
{
static unsigned long oops_timestamp;
if (time_after_eq(jiffies, oops_timestamp) &&
!time_after(jiffies, oops_timestamp + 30 * HZ))
return;
oops_timestamp = jiffies;
debug_locks_off();
/* If a crash is occurring, make sure we can't deadlock */
raw_spin_lock_init(&logbuf_lock);
/* And make sure that we print immediately */
sema_init(&console_sem, 1);
}
/*
* Check if we have any console that is capable of printing while cpu is
* booting or shutting down. Requires console_sem.
*/
static int have_callable_console(void)
{
struct console *con;
for_each_console(con)
if (con->flags & CON_ANYTIME)
return 1;
return 0;
}
/*
* Can we actually use the console at this time on this cpu?
*
* Console drivers may assume that per-cpu resources have been allocated. So
* unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
* call them until this CPU is officially up.
*/
static inline int can_use_console(unsigned int cpu)
{
return cpu_online(cpu) || have_callable_console();
}
/*
* Try to get console ownership to actually show the kernel
* messages from a 'printk'. Return true (and with the
* console_lock held, and 'console_locked' set) if it
* is successful, false otherwise.
*/
static int console_trylock_for_printk(void)
{
unsigned int cpu = smp_processor_id();
if (!console_trylock())
return 0;
/*
* If we can't use the console, we need to release the console
* semaphore by hand to avoid flushing the buffer. We need to hold the
* console semaphore in order to do this test safely.
*/
if (!can_use_console(cpu)) {
console_locked = 0;
up_console_sem();
return 0;
}
return 1;
}
int printk_delay_msec __read_mostly;
static inline void printk_delay(void)
{
if (unlikely(printk_delay_msec)) {
int m = printk_delay_msec;
while (m--) {
mdelay(1);
touch_nmi_watchdog();
}
}
}
kkkkkkkk19
最新推荐文章于 2024-05-04 14:23:21 发布