Hive alter table

接http://bupt04406.iteye.com/blog/1151545

create table tablePartition(s string) partitioned by(pt string);
alter table tablePartition add if not exists partition(pt='1');
alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE'); //内部表转外部表
alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='FALSE'); //外部表转内部表

hive> create table tablePartition(s string) partitioned by(pt string);
OK
Time taken: 0.209 seconds
[b]hive> desc formatted tablepartition;
[/b]OK
# col_name data_type comment

s string None

# Partition Information
# col_name data_type comment

pt string None

# Detailed Table Information
Database: default
Owner: root
CreateTime: Mon Aug 29 19:05:22 PDT 2011
LastAccessTime: UNKNOWN
Retention: 0
Location: hdfs://localhost:54310/user/hive/warehouse/tablepartition
[b]Table Type: MANAGED_TABLE [/b]
Table Parameters:
transient_lastDdlTime 1314669922

# Storage Information
SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
InputFormat: org.apache.hadoop.mapred.TextInputFormat
OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1


hive> [b]alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE');[/b]
OK
Time taken: 0.116 seconds
hive> desc formatted tablepartition;
OK
# col_name data_type comment

s string None

# Partition Information
# col_name data_type comment

pt string None

# Detailed Table Information
Database: default
Owner: root
CreateTime: Mon Aug 29 19:05:22 PDT 2011
LastAccessTime: UNKNOWN
Retention: 0
Location: hdfs://localhost:54310/user/hive/warehouse/tablepartition
[b]Table Type: EXTERNAL_TABLE [/b]
Table Parameters:
EXTERNAL TRUE
last_modified_by tianzhao
last_modified_time 1314670000
transient_lastDdlTime 1314670000

# Storage Information
SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe
InputFormat: org.apache.hadoop.mapred.TextInputFormat
OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat
Compressed: No
Num Buckets: -1
Bucket Columns: []
Sort Columns: []
Storage Desc Params:
serialization.format 1
Time taken: 0.089 seconds




CliDriver:
CliDriver.main() {
ret = cli.processLine(line); //alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE');
}
CliDriver:
public int processLine(String line) { // line = alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE');
ret = processCmd(command);
}

CliDriver:
public int processCmd(String cmd) { // cmd = alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE')
CommandProcessor proc = CommandProcessorFactory.get(tokens[0]);
Driver qp = (Driver) proc;
ret = qp.run(cmd).getResponseCode();
}

CommandProcessor proc = CommandProcessorFactory.get(tokens[0]); // tokens[0] = alter
创建return new Driver();来处理

Driver:
CommandProcessorResponse run(String command) {
// command = alter table tablePartition set TBLPROPERTIES ('EXTERNAL'='TRUE')
int ret = compile(command);
ret = execute();
}

Driver:
public int compile(String command) {
SemanticAnalyzerFactory.get(ASTNode tree){
return new DDLSemanticAnalyzer(conf);
}

}

DDLSemanticAnalyzer:
public void analyzeInternal(ASTNode ast) throws SemanticException {
} else if (ast.getToken().getType() == HiveParser.TOK_ALTERTABLE_PROPERTIES) {
analyzeAlterTableProps(ast, false);
}
}

DDLSemanticAnalyzer:
private void analyzeAlterTableProps(ASTNode ast, boolean expectView)
throws SemanticException {
String tableName = unescapeIdentifier(ast.getChild(0).getText()); // tablePartition
HashMap<String, String> mapProp = getProps((ASTNode) (ast.getChild(1))
.getChild(0)); // {EXTERNAL=TRUE}
AlterTableDesc alterTblDesc =
new AlterTableDesc(AlterTableTypes.ADDPROPS, expectView);
alterTblDesc.setProps(mapProp); // {EXTERNAL=TRUE}
alterTblDesc.setOldName(tableName); // tablePartition

accessTbName = tableName; //tablePartition
privilege = Privilege.ALTER_PRIV;

rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
alterTblDesc), conf)); //创建DDLwork,alterTblDesc不为空,创建DDLTask。
}
DDLSemanticAnalyzer:
static HashMap<String, String> getProps(ASTNode prop) {
HashMap<String, String> mapProp = new HashMap<String, String>();
readProps(prop, mapProp);
return mapProp; // {EXTERNAL=TRUE}
}
BaseSemanticAnalyzer:
public static void readProps(
ASTNode prop, Map<String, String> mapProp) {

for (int propChild = 0; propChild < prop.getChildCount(); propChild++) {
String key = unescapeSQLString(prop.getChild(propChild).getChild(0)
.getText()); // EXTERNAL
String value = unescapeSQLString(prop.getChild(propChild).getChild(1)
.getText()); // TRUE
mapProp.put(key, value); // {EXTERNAL=TRUE}
}
}

public AlterTableDesc(AlterTableTypes alterType, boolean expectView) {
op = alterType; // ADDPROPS
this.expectView = expectView; // false
}


[b]Driver.launchTask
TaskRunner.runSequential
Task. executeTask
DDLTask. Execute
AlterTableDesc alterTbl = work.getAlterTblDesc();
if (alterTbl != null) {
return alterTable(db, alterTbl);
}
DDLTask. alterTable(Hive db, AlterTableDesc alterTbl)
Hive.alterTable
HiveMetaStoreClient.alter_table
HiveMetaStore.alter_table
HiveAlterHandler.alterTable
ObjectStore
[/b]


private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException {
// alter the table
Table tbl = db.getTable(alterTbl.getOldName()); //从数据库中获取tablePartition的信息

Partition part = null;
if(alterTbl.getPartSpec() != null) {
part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
if(part == null) {
console.printError("Partition : " + alterTbl.getPartSpec().toString()
+ " does not exist.");
return 1;
}
}

validateAlterTableType(tbl, alterTbl.getOp()); // op = ADDPROPS

if (tbl.isView()) {
if (!alterTbl.getExpectView()) {
throw new HiveException("Cannot alter a view with ALTER TABLE");
}
} else {
if (alterTbl.getExpectView()) {
throw new HiveException("Cannot alter a base table with ALTER VIEW");
}
}

Table oldTbl = tbl.copy(); //拷贝一份

if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAME) {
tbl.setTableName(alterTbl.getNewName());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCOLS) {
List<FieldSchema> newCols = alterTbl.getNewCols();
List<FieldSchema> oldCols = tbl.getCols();
if (tbl.getSerializationLib().equals(
"org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
console
.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
tbl.setSerializationLib(LazySimpleSerDe.class.getName());
tbl.getTTable().getSd().setCols(newCols);
} else {
// make sure the columns does not already exist
Iterator<FieldSchema> iterNewCols = newCols.iterator();
while (iterNewCols.hasNext()) {
FieldSchema newCol = iterNewCols.next();
String newColName = newCol.getName();
Iterator<FieldSchema> iterOldCols = oldCols.iterator();
while (iterOldCols.hasNext()) {
String oldColName = iterOldCols.next().getName();
if (oldColName.equalsIgnoreCase(newColName)) {
console.printError("Column '" + newColName + "' exists");
return 1;
}
}
oldCols.add(newCol);
}
tbl.getTTable().getSd().setCols(oldCols);
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.RENAMECOLUMN) {
List<FieldSchema> oldCols = tbl.getCols();
List<FieldSchema> newCols = new ArrayList<FieldSchema>();
Iterator<FieldSchema> iterOldCols = oldCols.iterator();
String oldName = alterTbl.getOldColName();
String newName = alterTbl.getNewColName();
String type = alterTbl.getNewColType();
String comment = alterTbl.getNewColComment();
boolean first = alterTbl.getFirst();
String afterCol = alterTbl.getAfterCol();
FieldSchema column = null;

boolean found = false;
int position = -1;
if (first) {
position = 0;
}

int i = 1;
while (iterOldCols.hasNext()) {
FieldSchema col = iterOldCols.next();
String oldColName = col.getName();
if (oldColName.equalsIgnoreCase(newName)
&& !oldColName.equalsIgnoreCase(oldName)) {
console.printError("Column '" + newName + "' exists");
return 1;
} else if (oldColName.equalsIgnoreCase(oldName)) {
col.setName(newName);
if (type != null && !type.trim().equals("")) {
col.setType(type);
}
if (comment != null) {
col.setComment(comment);
}
found = true;
if (first || (afterCol != null && !afterCol.trim().equals(""))) {
column = col;
continue;
}
}

if (afterCol != null && !afterCol.trim().equals("")
&& oldColName.equalsIgnoreCase(afterCol)) {
position = i;
}

i++;
newCols.add(col);
}

// did not find the column
if (!found) {
console.printError("Column '" + oldName + "' does not exist");
return 1;
}
// after column is not null, but we did not find it.
if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
console.printError("Column '" + afterCol + "' does not exist");
return 1;
}

if (position >= 0) {
newCols.add(position, column);
}

tbl.getTTable().getSd().setCols(newCols);
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.REPLACECOLS) {
// change SerDe to LazySimpleSerDe if it is columnsetSerDe
if (tbl.getSerializationLib().equals(
"org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) {
console
.printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe");
tbl.setSerializationLib(LazySimpleSerDe.class.getName());
} else if (!tbl.getSerializationLib().equals(
MetadataTypedColumnsetSerDe.class.getName())
&& !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
&& !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
&& !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())) {
console.printError("Replace columns is not supported for this table. "
+ "SerDe may be incompatible.");
return 1;
}
tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
} else if (alterTbl.getOp() == [b]AlterTableDesc.AlterTableTypes.ADDPROPS[/b]) { //
tbl.getTTable().getParameters().putAll(alterTbl.getProps());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDEPROPS) {
tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
alterTbl.getProps());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDSERDE) {
tbl.setSerializationLib(alterTbl.getSerdeName());
if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) {
tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll(
alterTbl.getProps());
}
tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl
.getDeserializer()));
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) {
if(part != null) {
part.getTPartition().getSd().setInputFormat(alterTbl.getInputFormat());
part.getTPartition().getSd().setOutputFormat(alterTbl.getOutputFormat());
if (alterTbl.getSerdeName() != null) {
part.getTPartition().getSd().getSerdeInfo().setSerializationLib(
alterTbl.getSerdeName());
}
} else {
tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat());
tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat());
if (alterTbl.getSerdeName() != null) {
tbl.setSerializationLib(alterTbl.getSerdeName());
}
}
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDCLUSTERSORTCOLUMN) {
// validate sort columns and bucket columns
List<String> columns = Utilities.getColumnNamesFromFieldSchema(tbl
.getCols());
Utilities.validateColumnNames(columns, alterTbl.getBucketColumns());
if (alterTbl.getSortColumns() != null) {
Utilities.validateColumnNames(columns, Utilities
.getColumnNamesFromSortCols(alterTbl.getSortColumns()));
}
tbl.getTTable().getSd().setBucketCols(alterTbl.getBucketColumns());
tbl.getTTable().getSd().setNumBuckets(alterTbl.getNumberBuckets());
tbl.getTTable().getSd().setSortCols(alterTbl.getSortColumns());
} else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ALTERLOCATION) {
String newLocation = alterTbl.getNewLocation();
try {
URI locURI = new URI(newLocation);
if (!locURI.isAbsolute() || locURI.getScheme() == null
|| locURI.getScheme().trim().equals("")) {
throw new HiveException(
newLocation
+ " is not absolute or has no scheme information. "
+ "Please specify a complete absolute uri with scheme information.");
}
if (part != null) {
part.setLocation(newLocation);
} else {
tbl.setDataLocation(locURI);
}
} catch (URISyntaxException e) {
throw new HiveException(e);
}
} else {
console.printError("Unsupported Alter commnad");
return 1;
}

// set last modified by properties
String user = null;
try {
user = conf.getUser();
} catch (IOException e) {
console.printError("Unable to get current user: " + e.getMessage(),
stringifyException(e));
return 1;
}

if(part == null) {
tbl.setProperty("last_modified_by", user);
tbl.setProperty("last_modified_time", Long.toString(System
.currentTimeMillis() / 1000));
try {
tbl.checkValidity();
} catch (HiveException e) {
console.printError("Invalid table columns : " + e.getMessage(),
stringifyException(e));
return 1;
}
} else {
part.getParameters().put("last_modified_by", user);
part.getParameters().put("last_modified_time", Long.toString(System
.currentTimeMillis() / 1000));
}

try {
if (part == null) {
[b]db.alterTable(alterTbl.getOldName(), tbl);[/b] //修改表
} else {
db.alterPartition(tbl.getTableName(), part);
}
} catch (InvalidOperationException e) {
console.printError("Invalid alter operation: " + e.getMessage());
LOG.info("alter table: " + stringifyException(e));
return 1;
} catch (HiveException e) {
return 1;
}

// This is kind of hacky - the read entity contains the old table, whereas
// the write entity
// contains the new table. This is needed for rename - both the old and the
// new table names are
// passed
if(part != null) {
work.getInputs().add(new ReadEntity(part));
work.getOutputs().add(new WriteEntity(part));
} else {
work.getInputs().add(new ReadEntity(oldTbl));
work.getOutputs().add(new WriteEntity(tbl));
}
return 0;
}

Hive:
public void alterTable(String tblName, Table newTbl)
throws InvalidOperationException, HiveException {
try {
getMSC().alter_table(getCurrentDatabase(), tblName,
newTbl.getTTable());
} catch (MetaException e) {
throw new HiveException("Unable to alter table.", e);
} catch (TException e) {
throw new HiveException("Unable to alter table.", e);
}
}
HiveMetaStoreClient:
public void alter_table(String dbname, String tbl_name, Table new_tbl)
throws InvalidOperationException, MetaException, TException {
client.alter_table(dbname, tbl_name, new_tbl);
}
HiveMetaStore.HMSHandler:
public void alter_table(final String dbname, final String name, final Table newTable)
throws InvalidOperationException, MetaException {
incrementCounter("alter_table");
logStartFunction("alter_table: db=" + dbname + " tbl=" + name
+ " newtbl=" + newTable.getTableName()); // 11/08/28 07:02:02 INFO metastore.HiveMetaStore: 0: alter_table: db=default tbl=tablePartition newtbl=tablepartition
newTable.putToParameters(Constants.DDL_TIME, Long.toString(System
.currentTimeMillis() / 1000));

try {
executeWithRetry(new Command<Boolean>() {
@Override
Boolean run(RawStore ms) throws Exception {
alterHandler.alterTable(ms, wh, dbname, name, newTable); //AlterHandler
return Boolean.TRUE;
}
});
} catch (MetaException e) {
throw e;
} catch (InvalidOperationException e) {
throw e;
} catch (Exception e) {
assert(e instanceof RuntimeException);
throw (RuntimeException)e;
}

}
HiveMetaStore.HMSHandler:
private <T> T executeWithRetry(Command<T> cmd) throws Exception {
T ret = null;

boolean gotNewConnectUrl = false;
boolean reloadConf = HiveConf.getBoolVar(hiveConf,
HiveConf.ConfVars.METASTOREFORCERELOADCONF);

if (reloadConf) {
updateConnectionURL(getConf(), null);
}

int retryCount = 0;
Exception caughtException = null;
while(true) {
try {
RawStore ms = getMS(reloadConf || gotNewConnectUrl); // org.apache.hadoop.hive.metastore.ObjectStore
ret = cmd.run(ms);
break;
} catch (javax.jdo.JDOFatalDataStoreException e) {
caughtException = e;
} catch (javax.jdo.JDODataStoreException e) {
caughtException = e;
}

if (retryCount >= retryLimit) {
throw caughtException;
}

assert(retryInterval >= 0);
retryCount++;
LOG.error(
String.format(
"JDO datastore error. Retrying metastore command " +
"after %d ms (attempt %d of %d)", retryInterval, retryCount, retryLimit));
Thread.sleep(retryInterval);
// If we have a connection error, the JDO connection URL hook might
// provide us with a new URL to access the datastore.
String lastUrl = getConnectionURL(getConf());
gotNewConnectUrl = updateConnectionURL(getConf(), lastUrl);
}
return ret;
}

HiveAlterHandler:
public void alterTable(RawStore msdb, Warehouse wh, String dbname,
String name, Table newt) throws InvalidOperationException, MetaException { // msdb=org.apache.hadoop.hive.metastore.ObjectStore
if (newt == null) {
throw new InvalidOperationException("New table is invalid: " + newt);
}

if (!MetaStoreUtils.validateName(newt.getTableName())
|| !MetaStoreUtils.validateColNames(newt.getSd().getCols())) {
throw new InvalidOperationException(newt.getTableName()
+ " is not a valid object name");
}

Path srcPath = null;
FileSystem srcFs = null;
Path destPath = null;
FileSystem destFs = null;

boolean success = false;
String oldTblLoc = null;
String newTblLoc = null;
boolean moveData = false;
boolean rename = false;
try {
msdb.openTransaction();
name = name.toLowerCase(); // tablepartition
dbname = dbname.toLowerCase(); // default

// check if table with the new name already exists
if (!newt.getTableName().equalsIgnoreCase(name)
|| !newt.getDbName().equalsIgnoreCase(dbname)) {
if (msdb.getTable(newt.getDbName(), newt.getTableName()) != null) {
throw new InvalidOperationException("new table " + newt.getDbName()
+ "." + newt.getTableName() + " already exists");
}
rename = true;
}

// get old table
Table oldt = msdb.getTable(dbname, name); // Table(tableName:tablepartition, dbName:default, owner:root, createTime:1314540180, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:s, type:string, comment:null)], location:hdfs://localhost:54310/user/hive/warehouse/tablepartition, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pt, type:string, comment:null)], parameters:{transient_lastDdlTime=1314540180}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)

if (oldt == null) {
throw new InvalidOperationException("table " + newt.getDbName() + "."
+ newt.getTableName() + " doesn't exist");
}

// check that partition keys have not changed
if (oldt.getPartitionKeys().size() != newt.getPartitionKeys().size()
|| !oldt.getPartitionKeys().containsAll(newt.getPartitionKeys())) {
throw new InvalidOperationException(
"partition keys can not be changed.");
}

// if this alter is a rename, and user didn't change the
// default location (or new location is empty), and table is
// not an external table, that means user is asking metastore
// to move data to new location corresponding to the new name
if (rename
&& (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0
|| StringUtils.isEmpty(newt.getSd().getLocation()))
&& !MetaStoreUtils.isExternalTable(oldt)) { // rename=false
// that means user is asking metastore to move data to new location
// corresponding to the new name
// get new location
newTblLoc = wh.getDefaultTablePath(newt.getDbName(),
newt.getTableName()).toString();
newt.getSd().setLocation(newTblLoc);
oldTblLoc = oldt.getSd().getLocation();
moveData = true;
// check that destination does not exist otherwise we will be
// overwriting data
srcPath = new Path(oldTblLoc);
srcFs = wh.getFs(srcPath);
destPath = new Path(newTblLoc);
destFs = wh.getFs(destPath);
// check that src and dest are on the same file system
if (srcFs != destFs) {
throw new InvalidOperationException("table new location " + destPath
+ " is on a different file system than the old location "
+ srcPath + ". This operation is not supported");
}
try {
srcFs.exists(srcPath); // check that src exists and also checks
// permissions necessary
if (destFs.exists(destPath)) {
throw new InvalidOperationException("New location for this table "
+ newt.getDbName() + "." + newt.getTableName()
+ " already exists : " + destPath);
}
} catch (IOException e) {
Warehouse.closeFs(srcFs);
Warehouse.closeFs(destFs);
throw new InvalidOperationException("Unable to access new location "
+ destPath + " for table " + newt.getDbName() + "."
+ newt.getTableName());
}
// also the location field in partition
List<Partition> parts = msdb.getPartitions(dbname, name, 0);
for (Partition part : parts) {
String oldPartLoc = part.getSd().getLocation();
String oldTblLocPath = new Path(oldTblLoc).toUri().getPath();
String newTblLocPath = new Path(newTblLoc).toUri().getPath();
if (oldPartLoc.contains(oldTblLocPath)) {
URI newPartLocUri = null;
try {
URI oldPartLocUri = new URI(oldPartLoc);
newPartLocUri = new URI(
oldPartLocUri.getScheme(),
oldPartLocUri.getUserInfo(),
oldPartLocUri.getHost(),
oldPartLocUri.getPort(),
oldPartLocUri.getPath().replace(oldTblLocPath, newTblLocPath),
oldPartLocUri.getQuery(),
oldPartLocUri.getFragment());
} catch (URISyntaxException e) {
throw new InvalidOperationException("Old partition location " +
" is invalid. (" + oldPartLoc + ")");
}
part.getSd().setLocation(newPartLocUri.toString());
msdb.alterPartition(dbname, name, part);
}
}
}
// now finally call alter table
msdb.alterTable(dbname, name, newt);
//newt=Table(tableName:tablepartition, dbName:default, owner:root, createTime:1314540180, lastAccessTime:0, retention:0, sd:StorageDescriptor(cols:[FieldSchema(name:s, type:string, comment:null)], location:hdfs://localhost:54310/user/hive/warehouse/tablepartition, inputFormat:org.apache.hadoop.mapred.TextInputFormat, outputFormat:org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat, compressed:false, numBuckets:-1, serdeInfo:SerDeInfo(name:null, serializationLib:org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe, parameters:{serialization.format=1}), bucketCols:[], sortCols:[], parameters:{}), partitionKeys:[FieldSchema(name:pt, type:string, comment:null)], parameters:{EXTERNAL=TRUE, last_modified_by=tianzhao, last_modified_time=1314540223, transient_lastDdlTime=1314540299}, viewOriginalText:null, viewExpandedText:null, tableType:MANAGED_TABLE)

// commit the changes
success = msdb.commitTransaction();
} catch (InvalidObjectException e) {
LOG.debug(e);
throw new InvalidOperationException(
"Unable to change partition or table."
+ " Check metastore logs for detailed stack." + e.getMessage());
} finally {
if (!success) {
msdb.rollbackTransaction();
}
if (success && moveData) {
// change the file name in hdfs
// check that src exists otherwise there is no need to copy the data
try {
if (srcFs.exists(srcPath)) {
// rename the src to destination
srcFs.rename(srcPath, destPath);
}
} catch (IOException e) {
throw new InvalidOperationException("Unable to access old location "
+ srcPath + " for table " + dbname + "." + name);
}
}
}

}

ObjectStore:
public void alterTable(String dbname, String name, Table newTable)
throws InvalidObjectException, MetaException {
boolean success = false;
try {
openTransaction();
name = name.toLowerCase();
dbname = dbname.toLowerCase();
MTable newt = convertToMTable(newTable);// newt的tableType 已被转成 EXTERNAL_TABLE
if (newt == null) {
throw new InvalidObjectException("new table is invalid");
}

MTable oldt = getMTable(dbname, name);
if (oldt == null) {
throw new MetaException("table " + name + " doesn't exist");
}

// For now only alter name, owner, paramters, cols, bucketcols are allowed
oldt.setTableName(newt.getTableName().toLowerCase());
oldt.setParameters(newt.getParameters());
oldt.setOwner(newt.getOwner());
oldt.setSd(newt.getSd());
oldt.setDatabase(newt.getDatabase());
oldt.setRetention(newt.getRetention());
oldt.setPartitionKeys(newt.getPartitionKeys());
oldt.setTableType(newt.getTableType());

// commit the changes
success = commitTransaction();
} finally {
if (!success) {
rollbackTransaction();
}
}
}

ObjectStore:
private MTable convertToMTable(Table tbl) throws InvalidObjectException,
MetaException {
if (tbl == null) {
return null;
}
MDatabase mdb = null;
try {
mdb = getMDatabase(tbl.getDbName());
} catch (NoSuchObjectException e) {
LOG.error(StringUtils.stringifyException(e));
throw new InvalidObjectException("Database " + tbl.getDbName()
+ " doesn't exist.");
}

// If the table has property EXTERNAL set, update table type
// accordingly
String tableType = tbl.getTableType();
boolean isExternal = "TRUE".equals(tbl.getParameters().get("EXTERNAL"));
if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
if (isExternal) {
tableType = TableType.EXTERNAL_TABLE.toString();
}
}
if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
if (!isExternal) {
tableType = TableType.MANAGED_TABLE.toString();
}
}

return new MTable(tbl.getTableName().toLowerCase(), mdb,
convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl
.getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
tbl.getViewOriginalText(), tbl.getViewExpandedText(),
tableType);
}
  • 0
    点赞
  • 0
    收藏
    觉得还不错? 一键收藏
  • 0
    评论

“相关推荐”对你有帮助么?

  • 非常没帮助
  • 没帮助
  • 一般
  • 有帮助
  • 非常有帮助
提交
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值