intgpiochip_add_data(struct gpio_chip *chip,void*data){/*
* TODO: this allocates a Linux GPIO number base in the global
* GPIO numberspace for this chip. In the long run we want to
* get *rid* of this numberspace and use only descriptors, but
* it may be a pipe dream. It will not happen before we get rid
* of the sysfs interface anyways.
*/if(base <0){
base =gpiochip_find_base(chip->ngpio);}/* dynamic allocation of GPIOs, e.g. on a hotplugged device */staticintgpiochip_find_base(int ngpio){int base = ARCH_NR_GPIOS - ngpio;list_for_each_entry_reverse(gdev,&gpio_devices, list){/* found a free space? */if(gdev->base + gdev->ngpio <= base)break;else/* nope, check the space right before the chip */
base = gdev->base - ngpio;}if(gpio_is_valid(base)){pr_debug("%s: found new base at %d\n",__func__, base);return base;}}
staticintdo_poll(struct poll_list *list,struct poll_wqueues *wait,struct timespec64 *end_time){
poll_table* pt =&wait->pt;
ktime_t expire,*to =NULL;int timed_out =0, count =0;
u64 slack =0;unsignedint busy_flag =net_busy_loop_on()? POLL_BUSY_LOOP :0;unsignedlong busy_start =0;/* Optimise the no-wait case */if(end_time &&!end_time->tv_sec &&!end_time->tv_nsec){
pt->_qproc =NULL;
timed_out =1;}if(end_time &&!timed_out)
slack =select_estimate_accuracy(end_time);for(;;){struct poll_list *walk;
bool can_busy_loop = false;for(walk = list; walk !=NULL; walk = walk->next){struct pollfd * pfd,* pfd_end;
pfd = walk->entries;
pfd_end = pfd + walk->len;for(; pfd != pfd_end; pfd++){//遍历每一个fd/*
* Fish for events. If we found one, record it
* and kill poll_table->_qproc, so we don't
* needlessly register any other waiters after
* this. They'll get immediately deregistered
* when we break out and return.
*/if(do_pollfd(pfd, pt,&can_busy_loop,
busy_flag)){
count++;
pt->_qproc =NULL;/* found something, stop busy polling */
busy_flag =0;
can_busy_loop = false;}}}/*
* All waiters have already been registered, so don't provide
* a poll_table->_qproc to them on the next loop iteration.
*/
pt->_qproc =NULL;if(!count){
count = wait->error;if(signal_pending(current))
count =-EINTR;}if(count || timed_out)break;/* only if found POLL_BUSY_LOOP sockets && not out of time */if(can_busy_loop &&!need_resched()){if(!busy_start){
busy_start =busy_loop_current_time();continue;}if(!busy_loop_timeout(busy_start))continue;}
busy_flag =0;/*
* If this is the first loop and we have a timeout
* given, then we convert to ktime_t and set the to
* pointer to the expiry value.
*/if(end_time &&!to){
expire =timespec64_to_ktime(*end_time);
to =&expire;}if(!poll_schedule_timeout(wait, TASK_INTERRUPTIBLE, to, slack))
timed_out =1;}return count;}
/*
* Fish for pollable events on the pollfd->fd file descriptor. We're only
* interested in events matching the pollfd->events mask, and the result
* matching that mask is both recorded in pollfd->revents and returned. The
* pwait poll_table will be used by the fd-provided poll handler for waiting,
* if pwait->_qproc is non-NULL.
*/staticinlineunsignedintdo_pollfd(struct pollfd *pollfd, poll_table *pwait,
bool *can_busy_poll,unsignedint busy_flag){unsignedint mask;int fd;
mask =0;
fd = pollfd->fd;if(fd >=0){struct fd f =fdget(fd);
mask = POLLNVAL;if(f.file){
mask = DEFAULT_POLLMASK;if(f.file->f_op->poll){
pwait->_key = pollfd->events|POLLERR|POLLHUP;
pwait->_key |= busy_flag;
mask = f.file->f_op->poll(f.file, pwait);if(mask & busy_flag)*can_busy_poll = true;}/* Mask out unneeded events. */
mask &= pollfd->events | POLLERR | POLLHUP;fdput(f);}}
pollfd->revents = mask;return mask;}