1878 * requests on the queue, it is responsible for arranging that the requests
%3?/p>
1879 * get dealt with eventually.
1880 *
1881 * The queue spin lock must be held while manipulating the requests on the
1882 * request queue; this lock will be taken also from interrupt context, so irq
1883 * disabling is needed for it.
1884 *
1885 * Function returns a pointer to the initialized request queue, or NULL if
1886 * it didn't succeed.
1887 *
1888 * Note:
1889 * blk_init_queue() must be paired with a blk_cleanup_queue() call
1890 * when the block device is deactivated (such as at module unload).
1891 **/
1892
1893 request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
1894 {
1895 return blk_init_queue_node(rfn, lock, -1);
1896 }
1897 EXPORT_SYMBOL(blk_init_queue);
1898
1899 request_queue_t *
1900 blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
1901 {
1902 request_queue_t *q = blk_alloc_queue_node(GFP_KERNEL, node_id);
1903
1904 if (!q)
1905 return NULL;
1906
1907 q->node = node_id;
1908 if (blk_init_free_list(q)) {
1909 kmem_cache_free(requestq_cachep, q);
1910 return NULL;
1911 }
1912
1913 /*
1914 * if caller didn't supply a lock, they get per-queue locking with
1915 * our embedded lock
1916 */
1917 if (!lock) {
1918 spin_lock_init(&q->__queue_lock);
1919 lock = &q->__queue_lock;
1920 }
1921
1922 q->request_fn = rfn;
1923 q->prep_rq_fn = NULL;
1924 q->unplug_fn = generic_unplug_device;
1925 q->queue_flags = (1 << QUEUE_FLAG_CLUSTER);
1926 q->queue_lock = lock;
1927
1928 blk_queue_segment_boundary(q, 0xffffffff);
1929
1930 blk_queue_make_request(q, __make_request);
1931 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
1932
1933 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
1934 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
1935
1936 q->sg_reserved_size = INT_MAX;
1937
1938 /*
1939 * all done
1940 */
1941 if (!elevator_init(q, NULL)) {
1942 blk_queue_congestion_threshold(q);
1943 return q;
1944 }
1945
1946 blk_put_queue(q);
1947 return NULL;
1948 }