======20150731
***1 ( count java source code line)
tomcat6 source code
count_files=1069
count_lines=119385
count_lines_raw=329157
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.regex.Pattern;
public class ListAllFile {
private static int count_files = 0;
private static int count_lines = 0;
private static int count_lines_raw = 0;
Pattern pattern1 = Pattern.compile("/\\*\\*");
Pattern pattern2 = Pattern.compile("\\*");
Pattern pattern3 = Pattern.compile("\\*/");
Pattern pattern4 = Pattern.compile("//");
Pattern pattern5 = Pattern.compile("\\.java");
Pattern pattern6 = Pattern.compile("^\\}");
Pattern pattern7 = Pattern.compile("^\\{");
Pattern pattern8 = Pattern.compile("import[\\s]+?");
Pattern pattern9 = Pattern.compile("package[\\s]+?");
Pattern[] pattern_group=new Pattern[]{pattern1,pattern2,pattern3,pattern4,pattern6,pattern7,pattern8,pattern9};
private static String sourcepath = "";
public static void main(String[] args) throws Exception {
sourcepath = "E:\\mypc_svn\\local_svn\\sourcestudy\\tomcat6_tmp\\src\\main\\java";
StringBuffer sb = new StringBuffer();
new ListAllFile().listFile(new File(sourcepath), sb);
System.out.println(sb.toString());
System.out.println("count_files=" + count_files);
System.out.println("count_lines=" + count_lines);
System.out.println("count_lines_raw=" + count_lines_raw);
}
public void listFile(File file, StringBuffer sb) throws Exception {
if (file.isDirectory()) {
for (File f : file.listFiles()) {
if (!f.getCanonicalPath().endsWith(".svn")) {
listFile(f, sb);
}
}
} else {
// System.out.println(file.getCanonicalPath());
// if
// (file.getCanonicalPath().indexOf(File.separatorChar+".svn"+File.separatorChar)
// <0) {
if (pattern5.matcher(file.getName()).find()) {
sb.append(getFileInfo(file));
sb.append("\n");
}
}
}
public String getFileInfo(File file) throws Exception {
count_files++;
String shortName = file.getCanonicalPath().replace(sourcepath, "");
StringBuffer sb = new StringBuffer();
sb.append(shortName);
BufferedReader br = new BufferedReader(new FileReader(file));
String line = null;
line = br.readLine();
int count = 0;
boolean skip = false;
while (line != null) {
line = line.trim();
skip = false;
if (line.length() == 0 || checkWithPatternGroup(line,pattern_group)) {
skip = true;
}
if (!skip){
count_lines++;
count++;
//System.out.println(line);
}
count_lines_raw++;
// printDebug_1(line, skip, count);
line = br.readLine();
}
sb.append(",").append(count);
return sb.toString();
}
private boolean checkWithPatternGroup(String line,Pattern[] patterns){
for(Pattern p:patterns){
if(p.matcher(line).find()){
return true;
}
}
return false;
}
private void printDebug_1(String str, boolean skip, int count) {
// for debug
System.out.print(str);
if (!skip) {
System.out.print("------>" + count);
}
System.out.println("");
// for debug
}
}
For example
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.tomcat.util.threads;
/** Implemented if you want to run a piece of code inside a thread pool.
*/
public interface ThreadPoolRunnable {------>1
// XXX use notes or a hashtable-like
// Important: ThreadData in JDK1.2 is implemented as a Hashtable( Thread -> object ),
// expensive.
/** Called when this object is first loaded in the thread pool.
* Important: all workers in a pool must be of the same type,
* otherwise the mechanism becomes more complex.
*/
public Object[] getInitData();------>2
/** This method will be executed in one of the pool's threads. The
* thread will be returned to the pool.
*/
public void runIt(Object thData[]);------>3
}
======20150730
***1
when process, HTTP protocol request one or more space after ":" ? Right? I am not sure.
Host: localhost:8080 --> please note there is a space after ":"
When tomcat process it, it will move "localhost:8080" previous_step, it means after process it will be "Host:localhost:80800" --> please note it is "80800"
org.apache.coyote.http11.InternalInputBuffer.
while (!eol) {
// Read new bytes if needed
if (pos >= lastValid) {
if (!fill())
throw new EOFException(sm.getString("iib.eof.error"));
}
if (buf[pos] == Constants.CR) {
} else if (buf[pos] == Constants.LF) {
eol = true;
} else if (buf[pos] == Constants.SP) {
buf[realPos] = buf[pos];
realPos++;
} else {
buf[realPos] = buf[pos]; -----> this code
realPos++;
lastSignificantChar = realPos;
}
pos++;
}
Why? I don't.
In order to solve it, I change
headerValue.setBytes(buf, start, realPos - start);
buf[realPos] = Constants.SP; ----> add this code
I need check why?
for example
before
POST /examples/jsp/jsp2/el/basic-arithmetic.jsp HTTP/1.1
user-agent:Java/1.6.0_166
host:localhost:80800
accept:text/html, image/gif, image/jpeg, *; q=.2, */*; q=.22
connection:keep-alivee
content-type:application/x-www-form-urlencodedd
content-length:244
after
POST /examples/jsp/jsp2/el/basic-arithmetic.jsp HTTP/1.1
user-agent:Java/1.6.0_16
host:localhost:8080
accept:text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2
connection:keep-alive
content-type:application/x-www-form-urlencoded
content-length:24
======20150728
***1
http://blog.csdn.net/sunzhenhua0608/article/details/7628663
char在Java中是16位的,因为Java用的是Unicode
String str= "中";
char x ='中';
byte[] bytes=str.getBytes("utf-8");;
byte[] bytes1= charToByte(x);
运行结果:
bytes 大小:3
bytes1大小:2
java是用unicode来表示字符,"中"这个中文字符的unicode就是2个字节。
通常gbk/gb2312是2个字节,utf-8是3个字节。
***2
http://blog.csdn.net/sunzhenhua0608/article/details/7628746
按文本方式比如ASCII码形式解释,就把读到的二进制流每8比特8比特的翻译,
文本文件是字符的序列构成的,二进制文件是由位的序列构成的。例如,十进制整数199在文本文件中是以三个字符序列‘1’、‘9’、‘9’来存储的,total 3 bytes, 而在二进制文件中它是以byte类型的值C7存储的, total 4 bytes, because int has 4 bytes.
而从字符流中读取一个字符时,读取几个字节依赖于编码系统, ACSII码占8位,Java使用的是16位的Unicode码。当一个统一码无法转换为ACSII码时,转换为字符‘?’。例如,如果想把统一码‘u03b1’写到字符流中,送入到字符流的是数值63(表示字符‘?’)。
Java程序使用的是统一码(Unicode),从FileReader流中读取一个字符时,返回该字符的统一码。字符在文件中的编码可能不是统一码,Java自动将统一码转换为文件指定的编码
======20150727
***1
tomcat 性能之谜 (http://blog.csdn.net/mindfloating/article/details/8730065)
一开始我们对tomcat容器性能不佳的猜测经实测数据验证是错误的,tomcat本身性能并无问题,有问题的是我们采用的应用开发模式,甚至包括我们选择的一些应用开发框架
首先,实现了一个EchoServlet,它返回固定1k左右的字符串, 并且关闭了http的keep-alive机制,tomcatconnector 采用默认的bio模型。 测试结果还是挺让人吃惊的,tps均值达到了22000左右,Cpu usage: 180%
于是仅把struts引入,实现一个EchoAction
测试结果确实让人挺吃惊的,struts引入后tps下降为原来的一半,并且cpu消耗也上升了不少
Cpu usage: 280%
服务提供方的webservice接口基于cxf实现,和他们的应用一同部署在tomcat中,接口的逻辑很简单,数据传输量也很小(几百字节).调用接口的响应时间波动范围巨大,从数毫秒到数十秒之间,超时都是超过了30秒的情况。
======20150725
***1
http://www.th7.cn/Program/java/201411/310738.shtml
http://www.ibm.com/developerworks/cn/java/j-lo-jse63/
http://maimode.iteye.com/blog/1354377
http://www.knowsky.com/364099.html
http://www.cnblogs.com/itech/archive/2010/09/16/1827999.html
set CATALINA_OPTS=-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=8787 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false
在CMD中输入 : jconsole
service:jmx:rmi:///jndi/rmi://192.168.85.54:8787/jmxrmi
点开右边的MBean,我们可以获取其对应MBean的ObjectName,然后通过属性和做操的Key值获取想要的数据
Call a mbean by code
JMXConnector connector = null;
JMXServiceURL url = new JMXServiceURL(SERVICE_1);
connector = JMXConnectorFactory.connect(url);
MBeanServerConnection connection = connector.getMBeanServerConnection();
ObjectName objectName = new ObjectName("Catalina:type=Engine");
System.out.println("defaultHost:" + (String) connection.getAttribute(objectName, "defaultHost"));
CompositeDataSupport heapMemoryUsage = (CompositeDataSupport) connection.getAttribute(objectName,"HeapMemoryUsage");
objectName = new ObjectName("java.lang:type=Threading");
System.out.println("ThreadCount = " + connection.getAttribute(objectName, "ThreadCount"));// 守护线程
create a mbean
// 创建MBeanServer
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
// 新建MBean ObjectName, 在MBeanServer里标识注册的MBean
ObjectName name = new ObjectName("com.haitao.jmx:type=Echo");
// 创建MBean
Echo mbean = new Echo();
// 在MBeanServer里注册MBean, 标识为ObjectName(com.tenpay.jmx:type=Echo)
mbs.registerMBean(mbean, name);
// 在MBeanServer里调用已注册的EchoMBean的print方法
mbs.invoke(name, "print", new Object[] { "haitao.tu"}, new String[] {"java.lang.String"});
create a mbean with HtmlAdaptorServer
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();//可在jconsole中使用
//创建MBean
ControllerMBean controller = new Controller();
//将MBean注册到MBeanServer中
mbs.registerMBean(controller, new ObjectName("MyappMBean:name=controller"));
//创建适配器,用于能够通过浏览器访问MBean
HtmlAdaptorServer adapter = new HtmlAdaptorServer();
adapter.setPort(9797);
mbs.registerMBean(adapter, new ObjectName(
"MyappMBean:name=htmladapter,port=9797"));
adapter.start();
//由于上面的程序启用了html协议适配器,因此可以在浏览器中执行如同jconsole的操作,在浏览器中输入:http://localhost:9797即可
private static void JMXmonitor(String service) {
JMXConnector connector = null;
try {
JMXServiceURL url = new JMXServiceURL(SERVICE_1);
System.out.println("url: " + SERVICE_1);
connector = JMXConnectorFactory.connect(url);
MBeanServerConnection connection = connector.getMBeanServerConnection();
System.out.println("Catalina:");
ObjectName objectName = new ObjectName("Catalina:type=Engine");
System.out.println("defaultHost:" + (String) connection.getAttribute(objectName, "defaultHost"));
System.out.println("name:" + (String) connection.getAttribute(objectName, "name"));
System.out.println("baseDir:" + (String) connection.getAttribute(objectName, "baseDir"));
connection.getAttribute(objectName, "valveObjectNames");
objectName = new ObjectName("Catalina:type=Host,host=localhost");
javax.management.ObjectName[] a = (javax.management.ObjectName[]) connection.getAttribute(objectName,
"children");
for (int i = 0; i < a.length; i++)
{
System.out.println(a[i].toString());
}
System.out.println("/nMemory");
System.out.println("memory: HeapMemoryUsage");
objectName = new ObjectName("java.lang:type=Memory");
CompositeDataSupport heapMemoryUsage = (CompositeDataSupport) connection.getAttribute(objectName,
"HeapMemoryUsage");
System.out.println("committed = " + convertKB(heapMemoryUsage.get("committed")));
System.out.println("init = " + convertKB(heapMemoryUsage.get("init")));
System.out.println("max = " + convertKB(heapMemoryUsage.get("max")));
System.out.println("used = " + convertKB(heapMemoryUsage.get("used")));
System.out.println("/nmemory: NonHeapMemoryUsage");
CompositeDataSupport nonHeapMemoryUsage = (CompositeDataSupport) connection.getAttribute(objectName,
"NonHeapMemoryUsage");
System.out.println("committed = " + convertKB(nonHeapMemoryUsage.get("committed")));
System.out.println("init = " + convertKB(nonHeapMemoryUsage.get("init")));
System.out.println("max = " + convertKB(nonHeapMemoryUsage.get("max")));
System.out.println("used = " + convertKB(nonHeapMemoryUsage.get("used")));
System.out.println("/nThread");
objectName = new ObjectName("java.lang:type=Threading");
// 线程总数
System.out.println("ThreadCount = " + connection.getAttribute(objectName, "ThreadCount"));// 守护线程
System.out.println("DaemonThreadCount = " + connection.getAttribute(objectName, "DaemonThreadCount"));// 返回自从Java虚拟机启动或峰值重置以来峰值活动线程计数
System.out.println("PeakThreadCount = " + connection.getAttribute(objectName, "PeakThreadCount"));
System.out.println(
"CurrentThreadCpuTime = " + connection.getAttribute(objectName, "CurrentThreadCpuTime") + "ms");
System.out.println(
"CurrentThreadUserTime = " + connection.getAttribute(objectName, "CurrentThreadUserTime") + "ms");
System.out.println("/nClassLoading");
objectName = new ObjectName("java.lang:type=ClassLoading");
System.out.println(
"TotalLoadedClassCount = " + connection.getAttribute(objectName, "TotalLoadedClassCount") + "个");
System.out.println("/nCpu");
objectName = new ObjectName("java.lang:type=OperatingSystem");
long start = System.currentTimeMillis();
long startC = (long) connection.getAttribute(objectName, "ProcessCpuTime");
try {
TimeUnit.SECONDS.sleep(5);
} catch (Exception e) {
System.out.println("中断异常");
}
long end = System.currentTimeMillis();
long endC = (long) connection.getAttribute(objectName, "ProcessCpuTime");
// end - start 即为当前采集的时间单元,单位ms//endT - startT
// 为当前时间单元内cpu使用的时间,单位为ns//所以:
int availableProcessors = (int) connection.getAttribute(objectName, "AvailableProcessors");
double ratio = (endC - startC) / 1000000.0 / (end - start) / availableProcessors;
System.out.println("cup使用率 = " + ratio * 100 + "%");
} catch (Exception ex) {
System.out.println(ex);
} finally {
try {
connector.close();
} catch (Exception ex) {
System.out.println(ex);
}
}
}
======20150723
***1
安全管理器
-Djava.security.manager -Djava.security.policy=bin/res/my.policy
my.policy
grant{
permission java.io.FilePermission "<<ALL FILES>>","read";
};
package com.huawei.test;
import java.io.FilePermission;
public class JAVASecurityManagerTest {
public static void main(String[] args) {
setSecurityManagerForApp();
}
public static void setSecurityManagerForApp(){
System.out.println(System.getSecurityManager());
SecurityManager safeManager = System.getSecurityManager();
safeManager.checkPermission(new FilePermission("d:\\","read"));
System.out.println("hah");
}
}
http://www.thinksaas.cn/group/topic/298514/
一旦你配置了catalina.policy文件,Tomcat可以使用”-security”选项启动SecurityManager
catalina.policy
$CATALINA_HOME/bin/catalina.sh start -security (Unix)
%CATALINA_HOME%bincatalina start -security (Windows)
***2
java之jvm学习笔记
http://blog.csdn.net/yfqnihao/article/details/8262858
======20150718
***1
http://blog.csdn.net/cskgnt/article/details/8072726
- @Rule
- public TestWatcher watchman = new TestWatcher() {
- protected void starting(Description d) {
- caseIdentifier = d.getClassName() + "." + d.getMethodName();
- System.out.println("starting: " + caseIdentifier);
- }
- protected void succeeded(Description d) {
- caseIdentifier = d.getClassName() + " " + d.getMethodName();
- System.out.println("succeeded: " + caseIdentifier);
- }
- protected void failed(Throwable e, Description d) {
- caseIdentifier = d.getClassName() + " " + d.getMethodName();
- System.out.println("failed: " + caseIdentifier);
- }
- protected void finished(Description d) {
- sonAfter();
- caseIdentifier = d.getClassName() + " " + d.getMethodName();
- System.out.println("finished: " + caseIdentifier);
- }
- };
======20150717
*** 1
Classloader is just a collection of files.
For examaple
URL url=new URL("file:///F:/shared/");URLClassLoader sharedLoader=new URLClassLoader(new URL[]{url});
Class loadedClass1 = sharedLoader.loadClass("org.apache.catalina.TestForClassloader");
Usually classloader will load class from parent first. If you want load from own, you need rewrite loadClass(String name)
*** 2
public static Field getField(Class cls, String fieldName) {
try {
return cls.getField(fieldName);
} catch (Exception e) {
}
try {
return cls.getDeclaredField(fieldName);
} catch (Exception e) {
}
return null;
}
public static void setFieldVisible(Class cls, String fieldName) {
Field field=getField(cls,fieldName);
if(field!=null){
field.setAccessible(true);
}
}
//modifier=Modifier.PUBLIC | Modifier.STATIC, means "public static"
//modifier=field.getModifiers() & ~Modifier.PRIVATE, means change "FINAL" to ""
public static void changeFieldModifier(Field field,int modifier){
try {
Field modifiersField = Field.class.getDeclaredField("modifiers");
modifiersField.setAccessible(true);
modifiersField.setInt(field, modifier );
} catch (Exception e) {
e.printStackTrace();
}
}
*** 3
人类社会已经开启了三次产业革命。第一次产业革命,是以蒸汽机为标志。第二次产业革命,是以内燃机和电力发明为标志,
第三次产业革命,是以可再生能源(如核能)和互联网为标志。有研究表明,大数据或作为动力引擎之一,引领人类的第四次产业革命
互联网就是信息技术交换,它的工作就是分享信息,从而提高和帮助信息交换的双方。
信息太大了,所以信息交换无穷无尽。互联网的发展看不到尽头。
上网看电影只是一种娱乐,提供方得到名气和收入,使用方得到娱乐。
如果每人把经验/问题/想法等于其他人共享,将是每人受益。
当前互联网的热点是大数据,在于数据共享,集中分析使用。
比如购物历史,可以分析当前物质的需求和热点。
目前,在医疗领域,面临的一个重大挑战就是如何获取有关癌症病人的大量诊疗数据。
美国临床肿瘤学协会(American Society of Clinical Oncology,ASCO)首席执行官Allen Lichter曾指出,
在超过96%的病例中,病人的详细治疗信息“被锁在医疗档案和文件柜或者存储于未联网的电子系统中”。
但由于涉及到病人的隐私问题、机构间的利益冲突以及纯粹缺乏电子病历,阻碍着医疗领域的信息共享,让每一次癌症治疗,都像发生一个孤立事件。
令人恼火的是,很多医疗机构的诊断数据,要么从一开始就是一堆纸质文件,根本就没有数字化,从而不能更大范围的共享。
要么利用电子病历数字化后,然随后就束之高阁,形成信息孤岛。
各个医疗机构仅在可供自己访问的小数据集合上施以分析,形成最终结论,这如同“盲人摸象”一样,是片面的,甚至是错误的。
英特尔公司提出的“数据咖啡馆”,其核心理念把不同医疗机构的癌症诊疗数据汇聚到一起,形成大数据集合,但不同机构间的数据,“相逢但不相识”,“可用但不可见”。
一旦“数据咖啡馆”项目能成功实施,势必在某种程度上加速癌症研究的技术突破。
目前,信息技术(特别是现在的大数据技术)就如同一个“鲶鱼”,它游进哪个领域,都会带来“创造性破坏”。
在熊彼特看来,每一次大规模的创新,都淘汰旧的技术和生产体系,并建立起新的生产体系。
在沃森(Watson)智能系统中,通过编写数据挖掘分析算法,沃森可以模拟人体和成千上万种药物做病理和药理实验。
细胞突变是造成癌症的主要因素,经过一番“深思熟虑”,根据自己的“博学”医学经验,沃森可以给出抑制突变细胞最有效的药物。
CancerLinQ本身还是一个“快速学习系统”,通过机器学习技术,可从海量医疗数据中发现有价值的模式,进而形成对癌症深度洞察,并加快发现新药的速度。
大数据所需面临的挑战还在于,从我们身边的大千世界中获取的数据,十之七八是凌乱无章的,非结构化数据
Folding@home项目之所以能够成功,究其本质,是因为“众人拾柴火焰高”,它充分整合世界各地的志愿者的闲置计算资源,
来完成以往只能在大规模超级计算机上完成的项目。这是众多大规模分布式计算项目之一,也是最出名、普及最广的“网格计算”项目。
而“网格计算”,在某种意义上,就是现在热炒的“云计算”的妈妈
*** 4
http://www.csdn.net/article/2015-07-15/2825216 (五种基于 MapReduce 的并行计算框架介绍及性能测试)
本次实验的硬件资源基于 x86 服务器 1 台,配置为内存 32GB DDR3、E5 CPU/12 核、GPU,实验数据样本为 10M/50M/100M/500M/1000M 的文本文件五个,
我们使用 Hadoop MapReduce、Spark、Phoenix、Disco、Mars 等 MapReduce 框架分别运行文本分析程序,
基于结果一致的前提下统计出运行时间、运行时 CPU 占有率、运行时内存占有率等数据,并采用这些数据绘制成柱状图。
图 9 实验运行时间比较图是分析不同大小的文本文件所消耗的时间对比图。从上图可以看出,Hadoop MapReduce 的运行时间最长,
原因是 Hadoop 生态环境包含内容过多,所以每次任务启动时首先需要加载所需资源包,然后缓慢地发起任务,
并且由于本身是用性能较差的 Java 语言编写的,所以导致整体计算时间长、性能差。
Phoenix 由于采用汇编和 C 语言编写,内核很小,运行时所用资源很少,所以整个测试过程耗时也较少。
Spark 框架在 WordCount 实验中消耗的时长较 Disco 稍少,但是比 Phoenix、Mars 耗时太多。
耗时最短的两个框架是 Mars 和 Phoenix。需要时长从高到低分别是 Hadoop MapReduce、Disco、Spark、Phoenix、Mars。
图 11 内存使用率比较图是分析任务执行过程中内存使用情况对比。从图中可以看出,Mars 和 Phoenix 这两款框架所使用的内存在文本数据较小时是最少的,
随着文本数据的增大,Apache Spark 随着数据量增大而内存大幅增加,Mars 和 Phoenix 有一定幅度的内存使用增加趋势。
当数据量达到本次测试最大的 1000M 文本时,Spark 框架对内存的消耗是最小的,Hadoop MapReduce 和 Disco 需要占用较多的内存。
图 10-CPU 使用率比较图是分析任务执行过程当中 CPU 使用率情况图。
从上图可以看出,Hadoop MapReduce、Disco 这两个框架需要占用的 CPU 资源在 1000M 文本处理时基本到达最大饱和度 (大于 90%),
Apache Spark 的 CPU 使用率没有完全伴随着文本文件增大而大幅上涨,Phoenix 和 Mars 基本控制在对 CPU 使用率较低的范围内。
从上面的测试结果我们得出,如果用户只需要处理海量的文本文件,不需要考虑存储、二次数据挖掘等,采用 Phoenix 或者 Mars 是最大性价比的选择,
但是由于 Mars 必须在 GPU 上运行,本身 GPU 由于价格因素,导致不太可能在实际应用场景里推广,所以综合来看 Phoenix 是性价比最高的框架。
如果应用程序需要处理的数据量非常大,并且客户希望计算出的数据可以被存储和二次计算或数据挖掘,
那 Hadoop MapReduce 较好,因为整个 Hadoop 生态圈庞大,支持性很好。
Apache Spark 由于架构层面设计不同,所以对于 CPU、内存的使用率一直保持较低状态,它未来可以用于海量数据分析用途。
*** 5
A company has a "block leave". Some people take leave, and his replacement not allowed contact him for job issue. This rule will make company would not depend on specified person, and also give pressure the replacement, enforce replacement take job ASAP.
======20150716
*** 1
http://blog.163.com/sir_876/blog/static/117052232012829105319721/
http://blog.csdn.net/yixiaoping/article/details/9801397
连接内存模式的数据库
jdbc:h2:mem:test
连接server模式的数据库
jdbc:h2:tcp://localhost/~/test
jdbc:h2:tcp://<server>[:<port>]/[<path>]<databaseName>
jdbc:h2:tcp://localhost/~/test
jdbc:h2:tcp://dbserv:8084/~/sample
jdbc:h2:tcp://localhost/mem:test
*** 2
http://blog.csdn.net/chjttony/article/details/17838693
http://blog.csdn.net/ultrani/article/details/8993364
http://regbin.iteye.com/blog/1153615
Use Jmockit to unit test, so easy and powerful.. Put one jar into classpath.
Sample
@Mocked MyObject obj;//用@Mocked标注的对象,不需要赋值,jmockit自动mock
new NonStrictExpectations() {//录制预期模拟行为
{
obj.hello("Zhangsan"); returns("Hello Zhangsan");
//also:result = "Hello Zhangsan";
}
};
new Expectations(DateUtil.class) {
{
DateUtil.getCurrentDateStrByFormatType(anyInt);
result = new Delegate() {
public String getCurrentDateStrByFormatType(int type) {
if (type == 1) {
return "2010/07/22 15:52:55";
} else {
return "2010-07-22 15:52:55";
}
}
};
}
};
new MockUp<StateMocked>() {//使用MockUp修改被测试方法内部逻辑
@Mock
public int getTriple(int i) {
return i * 30;
}
};