df.c 源码

 
http://www.koders.com/c/fid9C541A435B215354EA5310CDAE1382D4655E961D.aspx
 
 
/* vi: set sw=4 ts=4: */
/*
* Mini df implementation for busybox
*
* Copyright (C) 1999,2000 by Lineo, inc.
* Written by Erik Andersen <andersen@lineo.com>, <andersee@debian.org>
* based on original code by (I think) Bruce Perens <bruce@pixar.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/

#include "internal.h"
#include <stdio.h>
#include <mntent.h>
#include <sys/stat.h>
#include <sys/vfs.h>

static const char df_usage[] = "df [filesystem ...]\n"
#ifndef BB_FEATURE_TRIVIAL_HELP
"\nPrint the filesystem space used and space available.\n"
#endif
;

extern const char mtab_file[]; /* Defined in utility.c */

static int df(char *device, const char *mountPoint)
{
struct statfs s;
long blocks_used;
long blocks_percent_used;

if (statfs(mountPoint, &s) != 0) {
perror(mountPoint);
return FALSE;
}

if (s.f_blocks > 0) {
blocks_used = s.f_blocks - s.f_bfree;
blocks_percent_used = (long)
(blocks_used * 100.0 / (blocks_used + s.f_bavail) + 0.5);
if (strcmp(device, "/dev/root") == 0) {
/* Adjusts device to be the real root device,
* or leaves device alone if it can't find it */
find_real_root_device_name( device);
}
printf("%-20s %9ld %9ld %9ld %3ld%% %s\n",
device,
(long) (s.f_blocks * (s.f_bsize / 1024.0)),
(long) ((s.f_blocks - s.f_bfree) * (s.f_bsize / 1024.0)),
(long) (s.f_bavail * (s.f_bsize / 1024.0)),
blocks_percent_used, mountPoint);

}

return TRUE;
}

extern int df_main(int argc, char **argv)
{
printf("%-20s %-14s %s %s %s %s\n", "Filesystem",
"1k-blocks", "Used", "Available", "Use%", "Mounted on");

if (argc > 1) {
struct mntent *mountEntry;
int status;

if (**(argv + 1) == '-') {
usage(df_usage);
}
while (argc > 1) {
if ((mountEntry = findMountPoint(argv[1], mtab_file)) == 0) {
fprintf(stderr, "%s: can't find mount point.\n", argv[1]);
exit(FALSE);
}
status = df(mountEntry->mnt_fsname, mountEntry->mnt_dir);
if (status != 0)
exit(status);
argc--;
argv++;
}
exit(TRUE);
} else {
FILE *mountTable;
struct mntent *mountEntry;

mountTable = setmntent(mtab_file, "r");
if (mountTable == 0) {
perror(mtab_file);
exit(FALSE);
}

while ((mountEntry = getmntent(mountTable))) {
df(mountEntry->mnt_fsname, mountEntry->mnt_dir);
}
endmntent(mountTable);
}

return(TRUE);
}

/*
Local Variables:
c-file-style: "linux"
c-basic-offset: 4
tab-width: 4
End:
*/

  • 0
    点赞
  • 1
    收藏
    觉得还不错? 一键收藏
  • 0
    评论
以下是一个使用 Python 进行爬虫和数据分析的招聘网站源码示例: ```python import requests from bs4 import BeautifulSoup import pandas as pd # 定义爬虫函数 def get_job_list(url): response = requests.get(url) soup = BeautifulSoup(response.text, 'html.parser') jobs = soup.find_all('div', {'class': 'job-primary'}) job_list = [] for job in jobs: try: title = job.find('div', {'class': 'job-title'}).text.strip() except: title = '' try: salary = job.find('span', {'class': 'red'}).text.strip() except: salary = '' try: company = job.find('div', {'class': 'company-text'}).a.text.strip() except: company = '' try: location = job.find('div', {'class': 'job-address'}).text.strip() except: location = '' job_list.append([title, salary, company, location]) return job_list # 爬取数据 url = 'https://www.zhipin.com/c100010000/?query=Python&page={}' job_list = [] for i in range(1, 11): url_page = url.format(i) job_list += get_job_list(url_page) # 转换数据为 DataFrame 格式 df = pd.DataFrame(job_list, columns=['Title', 'Salary', 'Company', 'Location']) # 数据清洗和处理 df.drop_duplicates(inplace=True) df.reset_index(drop=True, inplace=True) df['Salary_min'] = df.Salary.apply(lambda x: x.split('-')[0] if '-' in x else x) df['Salary_max'] = df.Salary.apply(lambda x: x.split('-')[1] if '-' in x else x) df['Salary_min'] = df.Salary_min.apply(lambda x: x.replace('K', '000').replace('以上', '')) df['Salary_max'] = df.Salary_max.apply(lambda x: x.replace('K', '000').replace('以上', '')) df['Salary_min'] = pd.to_numeric(df.Salary_min) df['Salary_max'] = pd.to_numeric(df.Salary_max) df['Salary_avg'] = (df.Salary_min + df.Salary_max) / 2 df['Location'] = df.Location.apply(lambda x: x.split('·')[0]) # 数据分析 location_group = df.groupby('Location')['Title'].count().reset_index().sort_values(by='Title', ascending=False) salary_group = df.groupby('Location')['Salary_avg'].mean().reset_index().sort_values(by='Salary_avg', ascending=False) # 结果输出 print('地区招聘数量排名:\n', location_group.head(10)) print('\n地区薪资水平排名:\n', salary_group.head(10)) ``` 这个示例使用 `requests` 和 `BeautifulSoup` 库进行网页爬取和解析,然后使用 `pandas` 库将数据转换为 DataFrame 格式进行清洗和处理,最后使用 DataFrame 的 groupby() 函数进行数据分析。输出结果为地区招聘数量排名和地区薪资水平排名。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值