第一步配置pom依赖
<properties>
<mysql.version>5.1.38</mysql.version>
<mybatis-plus.version>3.4.2</mybatis-plus.version>
<druid.version>1.1.9</druid.version>
</properties>
<dependencies>
<!-- https://mvnrepository.com/artifact/junit/junit -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13.1</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-web</artifactId>
</dependency>
<dependency>
<groupId>com.alibaba.cloud</groupId>
<artifactId>spring-cloud-starter-alibaba-nacos-discovery</artifactId>
</dependency>
<dependency>
<groupId>mysql</groupId>
<artifactId>mysql-connector-java</artifactId>
</dependency>
<dependency>
<groupId>com.baomidou</groupId>
<artifactId>mybatis-plus-boot-starter</artifactId>
<version>${mybatis-plus.version}</version>
</dependency>
<dependency>
<groupId>com.alibaba</groupId>
<artifactId>druid-spring-boot-starter</artifactId>
<version>${druid.version}</version>
</dependency>
<dependency>
<groupId>org.projectlombok</groupId>
<artifactId>lombok</artifactId>
</dependency>
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-data-redis</artifactId>
</dependency>
</dependencies>
第二步配置yum
server: # 本机端口号 port: 9094 spring: application: name: hr # 配置nacos服务注册中心 cloud: nacos: discovery: server-addr: 192.168.64.138:8848 username: nacos password: nacos namespace: public # 配置德鲁伊数据源 datasource: druid: url: jdbc:mysql://192.168.64.138:3306/tc_hr?useSSL=false&serverTimezone=Asia/Shanghai&characterEncoding=utf-8&autoReconnect=true username: root password: 3090_Cmok #初始化时建立物理连接的个数。初始化发生在显示调用init方法,或者第一次getConnection时 initial-size: 3 # #最大连接池数量 max-active: 14 #最小连接池数量 min-idle: 3 # 获取连接时最大等待时间,单位毫秒。配置了maxWait之后,缺省启用公平锁, # 并发效率会有所下降,如果需要可以通过配置useUnfairLock属性为true使用非公平锁。 max-wait: 60000 #有两个含义: #1: Destroy线程会检测连接的间隔时间 #2: testWhileIdle的判断依据,详细看testWhileIdle属性的说明 time-between-eviction-runs-millis: 60000 #配置一个连接在池中最小生存的时间,单位是毫秒 min-evictable-idle-time-millis: 180000 #用来检测连接是否有效的sql,要求是一个查询语句。如果validationQuery为null,testOnBorrow、testOnReturn、testWhileIdle都不会其作用。 validation-query: select 'x' #连接有效性检查的超时时间 1 秒 validation-query-timeout: 1 #申请连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能。 test-on-borrow: false #设置从连接池获取连接时是否检查连接有效性,true时,如果连接空闲时间超过minEvictableIdleTimeMillis进行检查,否则不检查;false时,不检查 test-while-idle: true #归还连接时执行validationQuery检测连接是否有效,做了这个配置会降低性能 test-on-return: false #是否缓存preparedStatement,也就是PSCache。PSCache对支持游标的数据库性能提升巨大,比如说oracle。在mysql下建议关闭。 pool-prepared-statements: true #要启用PSCache,必须配置大于0,当大于0时,poolPreparedStatements自动触发修改为true。在Druid中, # 不会存在Oracle下PSCache占用内存过多的问题,可以把这个数值配置大一些,比如说100 max-open-prepared-statements: 20 #数据库链接超过3分钟开始关闭空闲连接 秒为单位 remove-abandoned-timeout: 1800 #对于长时间不使用的连接强制关闭 remove-abandoned: true #打开后,增强timeBetweenEvictionRunsMillis的周期性连接检查,minIdle内的空闲连接, # 每次检查强制验证连接有效性. 参考:https://github.com/alibaba/druid/wiki/KeepAlive_cn keep-alive: true # 通过connectProperties属性来打开mergeSql功能;慢SQL记录 connect-properties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000 #是否超时关闭连接 默认为false ,若为true 就算数据库恢复连接,也无法连接上 break-after-acquire-failure: false #设置获取连接出错时的自动重连次数 connection-error-retry-attempts: 1 # 设置获取连接出错时是否马上返回错误,true为马上返回 fail-fast: true #属性类型是字符串,通过别名的方式配置扩展插件,常用的插件有: #监控统计用的filter:stat日志用的filter:log4j防御sql注入的filter:wall filters: stat,wall # 连接redis数据库 redis: host: 192.168.64.138 port: 6379 jedis: pool: max-active: 30 max-idle: 5 max-wait: -1 min-idle: 5 # 密码 password: ok # 配置日志 mybatis-plus: configuration: log-impl: org.apache.ibatis.logging.stdout.StdOutImpl # 配置mybatis-plus逻辑删除 global-config: db-config: # 删除的字段 logic-delete-field: deleted # 删除 logic-delete-value: 1 # 不删除 logic-not-delete-value: 0
第三步配置 实体类和工具类
//员工信息实体类 @Data @AllArgsConstructor @NoArgsConstructor @Builder public class Employees { @TableId(type = IdType.ASSIGN_ID) private Long empid; private Integer jobid; private String empname; private Integer age; private String telephone; private Integer gender; private Date entertime; private Integer depid; //配置逻辑删除 // @TableLogic(value = "0",delval = "1") private Integer deleted; //重写string方法 对应redis k-v值得格式 @Override public String toString() { SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd"); return empid+","+empname+","+depid+","+age+","+gender+","+jobid+","+sdf.format(entertime)+","+telephone; } } //员工信息实体类视图 @Data @AllArgsConstructor @NoArgsConstructor @Builder public class EmployeesVO { @TableId(type = IdType.ASSIGN_ID) private Long empid; private Integer jobid; private Integer depid; private String empname; private Integer age; private String telephone; private Integer gender; private Date entertime; private String token; } //员工登录实体类 @Data @AllArgsConstructor @NoArgsConstructor @Builder public class Syslogins { @TableId(type = IdType.ASSIGN_ID) private Long slid; private Long empid; private String loginname; private String loginpwd; private Integer deleted; private Integer version; } //员工登录实体类视图 @Data @AllArgsConstructor @NoArgsConstructor @Builder public class EmpSysVO { private Integer jobid; private String empname; private Integer age; private String telephone; private Integer gender; private Date entertime; private Integer depid; private String loginname ; private String loginpwd; }
//工具类 用于生成随机token值
public class TokenGenerator { //构造器私有 private TokenGenerator(){} //单例模式 饿汉式 private static TokenGenerator generator =new TokenGenerator(); //返回唯一一个对象 public static TokenGenerator getInstance(){ return generator; } //对传入的empid 生成随机tokenid public String getToken(String empid){ //时间戳 long timestamp = new Date().getTime(); //使用MD5码计算用户id String userCode = MD5Utils.md5Hex(empid ,"UTF-8"); return timestamp+userCode; } }
第四步配置controller层----------注册
@RestController @RequestMapping("/authreg") public class RegCtrl { @Resource private RegisterService registerService; @PostMapping("/reguser") public String regtest(@RequestBody EmpSysVO evo){ registerService.register(evo); return "ok"; } }
第五步配置service层----------注册
//接口
public interface RegisterService { public void register(EmpSysVO esv); }//实现类
@Service public class RegisterServiceImpl implements RegisterService { @Resource private EmployeesMapper empMapper; @Resource private SysloginsMapper sysloginsMapper; @Resource private RedisTemplate<String,String> template; @Transactional @Override public void register(EmpSysVO esv) { String md5Pwd = MD5Utils.md5Hex(esv.getLoginpwd(),"UTF-8"); //向数据库存放数据 //创建Employees对象 Employees emp = new Employees(); //使用beanutils复制到目标对象Employees BeanUtils.copyProperties(esv,emp); //设置delete属性为0 代表未删除 emp.setDeleted(0); //1 调用mp 向Employees增加对象 empMapper.insert(emp); // System.out.println(emp.getEmpid()); //创建登录Syslogins对象 填入账号密码 密码加密 empid从Employees对象主键雪花算法获取 //delete为0 代表没删除 version=1 Syslogins sys = Syslogins.builder().loginname(esv.getLoginname()).loginpwd(md5Pwd). empid(emp.getEmpid()).deleted(0).version(1). build(); //2 调用mp 向Syslogins增加对象 sysloginsMapper.insert(sys); //3 向缓存数据库redis存放数据 //k 用的是账号加密码 //v 用的是Employees重写的tostring方法 格式如下: // return empid+","+empname+","+depid+","+age+","+gender+","+jobid+","+sdf.format(entertime)+","+telephone; template.opsForHash().put("users",esv.getLoginname()+ md5Pwd,emp.toString()); } }
第6步配置mapper层----------注册
//mapper实现mysql-puls接口 实现CRUD @Mapper public interface EmployeesMapper extends BaseMapper<Employees> { }@Mapper public interface SysloginsMapper extends BaseMapper<Syslogins> { }
测试查询结果!
第七步配置controller层----------登录
@RestController @RequestMapping("/authlogin") public class LoginCtrl { @Resource private LoginService loginService; @PostMapping(value ="/user_login") public EmployeesVO login(@RequestParam("username")String username, @RequestParam("pwd")String pwd){ // EmployeesVO evo = EmployeesVO.builder().empid(1).jobid(1).empname("张三").telephone("13012345678").age(30) // .entertime(new Date()).gender(1).token("1234567890").build(); EmployeesVO evo = loginService.user_login(username, pwd); return evo; } /** * 使用token进行登录 * @param token * @return */ @PostMapping("/token_login") public EmployeesVO token_login(@RequestHeader("token") String token){ return loginService.token_login(token); } }
第八步配置service层----------登录
//接口
public interface LoginService { public EmployeesVO user_login(String loginname,String loginpwd); public EmployeesVO token_login(String token); }//实现类
@Service @Transactional public class LoginServiceImpl implements LoginService { // @Resource // private SysloginsMapper sysMapper; // @Resource // private EmployeesMapper empMapper; @Resource private StringRedisTemplate templatess; @Override public EmployeesVO user_login(String loginname, String loginpwd) { //密码进行md5加密 String md5 = MD5Utils.md5Hex(loginpwd, "UTF-8"); //使用RedisTemplate操作redis数据库 调用opsForHash()方法代表使用hash // 去redis里查数据 判断有没有用户 Boolean hasUser = templatess.opsForHash().hasKey("users", loginname + md5); //如何有对象 if(hasUser){ //获取账号密码对应的值 String users = templatess.opsForHash().get("users", loginname + md5).toString(); //users.split(",")[0] 获取这个数组第一个数据 也就是empid String token = TokenGenerator.getInstance().getToken(users.split(",")[0]); //再向redis中存放一个token + 用户名+密码 并设置过期时间 //为了避开缓存雪崩使用在正常规定失效的时间后追加一个随机时间让所有用户分开失效 Random rand = new Random(); //1800=30分钟失效 templatess.opsForValue().set(token,loginname+md5, Duration.ofSeconds(1800+rand.nextInt(1000))); System.err.println(templatess.opsForValue().get(token)); //调用下面的buildEmpByStr方法返回对象 return buildEmpByStr(users,token); } return null; } @Override public EmployeesVO token_login(String token) { String useradnpwd = templatess.opsForValue().get(token).toString(); String users = templatess.opsForHash().get("users", useradnpwd).toString(); return buildEmpByStr(users,token); } private EmployeesVO buildEmpByStr(String users,String token){ //分割字符串填充EmpLoyeesVO String[] infos = users.split(","); SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd"); EmployeesVO evo = null; try { evo = EmployeesVO.builder(). empid(Long.parseLong(infos[0])) .empname(infos[1]) .depid(Integer.parseInt(infos[2])) .age(Integer.parseInt(infos[3])) .gender(Integer.parseInt(infos[4])) .jobid(Integer.parseInt(infos[5])) .entertime(sdf.parse(infos[6])) .telephone(infos[7]) .token(TokenGenerator.getInstance().getToken(infos[0])) .build(); } catch (ParseException e) { e.printStackTrace(); } return evo; } }
第九步配置controller层----------注销
@RestController @RequestMapping("/authdestroy") public class DestoryCtrl { @Resource private DestoryService destoryService; @GetMapping("/destroy/user/{empid}") public String destoryUser(@PathVariable("empid")Long empid){ destoryService.destory_user(empid); return "SUCCESS"; } @PostMapping("/destroy/user/{empid}") public List<Employees> find10(){ return destoryService.find10(); } }
第十步配置service层----------注销
@Service public class DestoryServiceImpl implements DestoryService { @Resource private SysloginsMapper sysMapper; @Resource private EmployeesMapper employeesMapper; @Resource private StringRedisTemplate template; @Transactional @Override public void destory_user(Long empid) { //查询用户的用户名和密码 LambdaQueryWrapper<Syslogins> lqw = new LambdaQueryWrapper<>(); lqw.select(Syslogins::getLoginname,Syslogins::getLoginpwd,Syslogins::getSlid).eq(Syslogins::getEmpid,empid); //通过账号密码找出实体类Syslogins Syslogins loginUser = sysMapper.selectOne(lqw); //1 删除Syslogins对象 sysMapper.deleteById(loginUser.getSlid()); //2 删除mysql数据库Employees对象 逻辑删除 employeesMapper.deleteById(empid); //3 redis删除 在销毁数据库用户的同时 删除缓存数据库中对应的实体类Syslogins对象 template.opsForHash().delete("users",loginUser.getLoginname()+loginUser.getLoginpwd()); } public List<Employees> find10(){ LambdaQueryWrapper<Employees> lqw = new LambdaQueryWrapper<>(); //查出最后10条数据 sql拼接 有可能会sql注入 lqw.last("limit 10"); return employeesMapper.selectList(lqw); } }
mapper层 在第六步就写过了 这里就不在重复了
接口测试!!!
同理redis里的数据也直接删除了!!!