phoenix jdbc driver查询源码分析

phoenix driver查询源码分析

首先 phoenix 支持jbbc协议,则就要按装jdbc的协议接口进行处理。
首先要实现driver PhoenixDriver,这个driver负责连接的创建,和连接到目标
服务器端的链接创建。
在该类中有一个创建链接的方法

protected final Connection createConnection(String url, Properties info) throws SQLException {
  Properties augmentedInfo = PropertiesUtil.deepCopy(info);
  augmentedInfo.putAll(getDefaultProps().asMap());
  ConnectionQueryServices connectionServices = getConnectionQueryServices(url, augmentedInfo);
  PhoenixConnection connection = connectionServices.connect(url, augmentedInfo);
  return connection;
}

然后拿到链接对象进行创建链接

@Override
protected ConnectionQueryServices getConnectionQueryServices(String url, Properties info) throws SQLException {
    try {
        lockInterruptibly(LockMode.READ);
        checkClosed();
        ConnectionInfo connInfo = ConnectionInfo.create(url);
        QueryServices services = getQueryServices();
        ConnectionInfo normalizedConnInfo = connInfo.normalize(services.getProps());
        ConnectionQueryServices connectionQueryServices = connectionQueryServicesMap.get(normalizedConnInfo);
        if (connectionQueryServices == null) {
            if (normalizedConnInfo.isConnectionless()) {
                connectionQueryServices = new ConnectionlessQueryServicesImpl(services, normalizedConnInfo, info);
            } else {
                connectionQueryServices = new ConnectionQueryServicesImpl(services, normalizedConnInfo, info);
            }
            ConnectionQueryServices prevValue = connectionQueryServicesMap.putIfAbsent(normalizedConnInfo, connectionQueryServices);
            if (prevValue != null) {
                connectionQueryServices = prevValue;
            }
        }

在 ConnectionInfo connInfo = ConnectionInfo.create(url);这个方法当中,解释url中的zk地址,
然后创建链接查询对象

connectionQueryServices = new ConnectionQueryServicesImpl(services, normalizedConnInfo, info);

在 ConnectionQueryServicesImpl 开始进行 init(final String url, final Properties props) 初始化
首先打开链接

private void openConnection() throws SQLException {
    try {
        // check if we need to authenticate with kerberos
        String clientKeytab = this.getProps().get(HBASE_CLIENT_KEYTAB);
        String clientPrincipal = this.getProps().get(HBASE_CLIENT_PRINCIPAL);
        if (clientKeytab != null && clientPrincipal != null) {
            logger.info("Trying to connect to a secure cluster with keytab:" + clientKeytab);
            UserGroupInformation.setConfiguration(config);
            User.login(config, HBASE_CLIENT_KEYTAB, HBASE_CLIENT_PRINCIPAL, null);
            logger.info("Successfull login to secure cluster!!");
        }
        boolean transactionsEnabled = props.getBoolean(
                QueryServices.TRANSACTIONS_ENABLED,
                QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED);
        // only initialize the tx service client if needed
        if (transactionsEnabled) {
            initTxServiceClient();
        }
        this.connection = HBaseFactoryProvider.getHConnectionFactory().createConnection(this.config);
    } catch (IOException e) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION)
            .setRootCause(e).build().buildException();
    }
    if (this.connection.isClosed()) { // TODO: why the heck doesn't this throw above?
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.CANNOT_ESTABLISH_CONNECTION).build().buildException();
    }
}

可以看到,打开的connection 是 org.apache.hadoop.hbase.client.HConnection; 是创建到hbase的链接了
接着包装了一下链接对象

 metaConnection = new PhoenixConnection(
                                ConnectionQueryServicesImpl.this, globalUrl, scnProps, newEmptyMetaData());

可以看下该对象的初始化,phoenix里面的很多特性都是在这里可以看到缩影

public PhoenixConnection(ConnectionQueryServices services, String url, Properties info, PMetaData metaData, MutationState mutationState, boolean isDescVarLengthRowKeyUpgrade) throws SQLException {
    this.url = url;
    this.isDescVarLengthRowKeyUpgrade = isDescVarLengthRowKeyUpgrade;
    // Copy so client cannot change
    this.info = info == null ? new Properties() : PropertiesUtil.deepCopy(info);
    final PName tenantId = JDBCUtil.getTenantId(url, info);
    if (this.info.isEmpty() && tenantId == null) {
        this.services = services;
    } else {
        // Create child services keyed by tenantId to track resource usage for
        // a tenantId for all connections on this JVM.
        if (tenantId != null) {
            services = services.getChildQueryServices(tenantId.getBytesPtr());
        }
        ReadOnlyProps currentProps = services.getProps();
        final ReadOnlyProps augmentedProps = currentProps.addAll(filterKnownNonProperties(this.info));
        this.services = augmentedProps == currentProps ? services : new DelegateConnectionQueryServices(services) {
            @Override
            public ReadOnlyProps getProps() {
                return augmentedProps;
            }
        };
    }

    Long scnParam = JDBCUtil.getCurrentSCN(url, this.info);
    checkScn(scnParam);
    this.scn = scnParam;
    this.isAutoFlush = this.services.getProps().getBoolean(QueryServices.TRANSACTIONS_ENABLED, QueryServicesOptions.DEFAULT_TRANSACTIONS_ENABLED)
            && this.services.getProps().getBoolean(QueryServices.AUTO_FLUSH_ATTRIB, QueryServicesOptions.DEFAULT_AUTO_FLUSH) ;
    this.isAutoCommit = JDBCUtil.getAutoCommit(
            url, this.info,
            this.services.getProps().getBoolean(
                    QueryServices.AUTO_COMMIT_ATTRIB,
                    QueryServicesOptions.DEFAULT_AUTO_COMMIT));
    this.consistency = JDBCUtil.getConsistencyLevel(url, this.info, this.services.getProps()
             .get(QueryServices.CONSISTENCY_ATTRIB,
                     QueryServicesOptions.DEFAULT_CONSISTENCY_LEVEL));
    this.tenantId = tenantId;
    this.mutateBatchSize = JDBCUtil.getMutateBatchSize(url, this.info, this.services.getProps());
    datePattern = this.services.getProps().get(QueryServices.DATE_FORMAT_ATTRIB, DateUtil.DEFAULT_DATE_FORMAT);
    timePattern = this.services.getProps().get(QueryServices.TIME_FORMAT_ATTRIB, DateUtil.DEFAULT_TIME_FORMAT);
    timestampPattern = this.services.getProps().get(QueryServices.TIMESTAMP_FORMAT_ATTRIB, DateUtil.DEFAULT_TIMESTAMP_FORMAT);
    String numberPattern = this.services.getProps().get(QueryServices.NUMBER_FORMAT_ATTRIB, NumberUtil.DEFAULT_NUMBER_FORMAT);
    int maxSize = this.services.getProps().getInt(QueryServices.MAX_MUTATION_SIZE_ATTRIB,QueryServicesOptions.DEFAULT_MAX_MUTATION_SIZE);
    Format dateFormat = DateUtil.getDateFormatter(datePattern);
    Format timeFormat = DateUtil.getDateFormatter(timePattern);
    Format timestampFormat = DateUtil.getDateFormatter(timestampPattern);
    formatters.put(PDate.INSTANCE, dateFormat);
    formatters.put(PTime.INSTANCE, timeFormat);
    formatters.put(PTimestamp.INSTANCE, timestampFormat);
    formatters.put(PUnsignedDate.INSTANCE, dateFormat);
    formatters.put(PUnsignedTime.INSTANCE, timeFormat);
    formatters.put(PUnsignedTimestamp.INSTANCE, timestampFormat);
    formatters.put(PDecimal.INSTANCE, FunctionArgumentType.NUMERIC.getFormatter(numberPattern));
    // We do not limit the metaData on a connection less than the global one,
    // as there's not much that will be cached here.
    Pruner pruner = new Pruner() {

        @Override
        public boolean prune(PTable table) {
            long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
            return (table.getType() != PTableType.SYSTEM && 
                    (  table.getTimeStamp() >= maxTimestamp || 
                     ! Objects.equal(tenantId, table.getTenantId())) );
        }

        @Override
        public boolean prune(PFunction function) {
            long maxTimestamp = scn == null ? HConstants.LATEST_TIMESTAMP : scn;
            return ( function.getTimeStamp() >= maxTimestamp ||
                     ! Objects.equal(tenantId, function.getTenantId()));
        }
    };
    this.isRequestLevelMetricsEnabled = JDBCUtil.isCollectingRequestLevelMetricsEnabled(url, info, this.services.getProps());
    this.mutationState = mutationState == null ? newMutationState(maxSize) : new MutationState(mutationState);
    this.metaData = metaData.pruneTables(pruner);
    this.metaData = metaData.pruneFunctions(pruner);
    this.services.addConnection(this);

    // setup tracing, if its enabled
    this.sampler = Tracing.getConfiguredSampler(this);
    this.customTracingAnnotations = getImmutableCustomTracingAnnotations();
    this.scannerQueue = new LinkedBlockingQueue<>();
    this.tableResultIteratorFactory = new DefaultTableResultIteratorFactory();
    GLOBAL_OPEN_PHOENIX_CONNECTIONS.increment();
}                                       

在上面的代码中可以到 tenantId (承租)这涉及到使用人员的资源分配的问题.
scnParam 属性,涉及到当时hbase的时间戳的问题
.consistency 属性,涉及到数据的一致性问题
.所以其它phoenix对hbase的查询就是在hbase的connection上面包装了一层的代理,增量自己的特性。
当链接创建成功后,就要开始初始化phoenix的元数据表。在类 ConnectionQueryServicesImpl.init 的方法中
进行相关的元数据表的初始化,判断相关的元数据表是否已经创建,如果没有创建就进行创建

  metaConnection.createStatement().executeUpdate(QueryConstants.CREATE_TABLE_METADATA);
CREATE_TABLE_METADATA =  CREATE TABLE " + SYSTEM_CATALOG_SCHEMA + ".\"" + SYSTEM_CATALOG_TABLE + 

上面就是对system.catalog的目录表的创建。
首先创建 statement,就是下面的对象

 PhoenixStatement statement = new PhoenixStatement(this);   
      @Override
public int executeUpdate(String sql) throws SQLException {
    CompilableStatement stmt = parseStatement(sql);
    if (!stmt.getOperation().isMutation) {
        throw new ExecuteUpdateNotApplicableException(sql);
    }
    if (!batch.isEmpty()) {
        throw new SQLExceptionInfo.Builder(SQLExceptionCode.EXECUTE_UPDATE_WITH_NON_EMPTY_BATCH)
        .build().buildException();
    }
    int updateCount = executeMutation(stmt);
    flushIfNecessary();
    return updateCount;
}

然后进行sql的编译

  parser = new PhoenixStatementParser(sql, new ExecutableNodeFactory());    

创建了上面的对象对sql进行了解释,接着到 executeMutation方法中进行执行

 new CallRunner.CallableThrowable<Integer, SQLException>() {
                    @Override
                        public Integer call() throws SQLException {
                        try {
                            MutationState state = connection.getMutationState();
                            //这里进行编译成执行计划
                            MutationPlan plan = stmt.compilePlan(PhoenixStatement.this, Sequence.ValueOp.VALIDATE_SEQUENCE);
                            if (plan.getTargetRef() != null && plan.getTargetRef().getTable() != null && plan.getTargetRef().getTable().isTransactional()) {
                                state.startTransaction();
                            }
                            Iterator<TableRef> tableRefs = plan.getSourceRefs().iterator();
                            state.sendUncommitted(tableRefs);
                            state.checkpointIfNeccessary(plan);
                            MutationState lastState = plan.execute();
                            state.join(lastState);
                            if (connection.getAutoCommit()) {
                                connection.commit();;
                            }
                            setLastResultSet(null);
                            setLastQueryPlan(null);
                            // Unfortunately, JDBC uses an int for update count, so we
                            // just max out at Integer.MAX_VALUE
                            int lastUpdateCount = (int) Math.min(Integer.MAX_VALUE, lastState.getUpdateCount());
      
  • 2
    点赞
  • 2
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值